Spaces:
Runtime error
Runtime error
Enable project approvals
Browse files- app.py +4 -2
- requirements.txt +1 -0
- run_evaluation_jobs.py +44 -0
app.py
CHANGED
@@ -510,8 +510,8 @@ with st.form(key="form"):
|
|
510 |
).json()
|
511 |
print(f"INFO -- Dataset creation response: {data_json_resp}")
|
512 |
if data_json_resp["download_status"] == 1:
|
513 |
-
train_json_resp =
|
514 |
-
path=f"/projects/{project_json_resp['id']}/data/
|
515 |
token=HF_TOKEN,
|
516 |
domain=AUTOTRAIN_BACKEND_API,
|
517 |
).json()
|
@@ -548,6 +548,8 @@ with st.form(key="form"):
|
|
548 |
)
|
549 |
print("INFO -- Pushing evaluation job logs to the Hub")
|
550 |
evaluation_log = {}
|
|
|
|
|
551 |
evaluation_log["payload"] = project_payload
|
552 |
evaluation_log["project_creation_response"] = project_json_resp
|
553 |
evaluation_log["dataset_creation_response"] = data_json_resp
|
|
|
510 |
).json()
|
511 |
print(f"INFO -- Dataset creation response: {data_json_resp}")
|
512 |
if data_json_resp["download_status"] == 1:
|
513 |
+
train_json_resp = http_post(
|
514 |
+
path=f"/projects/{project_json_resp['id']}/data/start_processing",
|
515 |
token=HF_TOKEN,
|
516 |
domain=AUTOTRAIN_BACKEND_API,
|
517 |
).json()
|
|
|
548 |
)
|
549 |
print("INFO -- Pushing evaluation job logs to the Hub")
|
550 |
evaluation_log = {}
|
551 |
+
evaluation_log["project_id"] = project_json_resp["id"]
|
552 |
+
evaluation_log["is_evaluated"] = False
|
553 |
evaluation_log["payload"] = project_payload
|
554 |
evaluation_log["project_creation_response"] = project_json_resp
|
555 |
evaluation_log["dataset_creation_response"] = data_json_resp
|
requirements.txt
CHANGED
@@ -4,6 +4,7 @@ streamlit==1.10.0
|
|
4 |
datasets<2.3
|
5 |
evaluate<0.2
|
6 |
jsonlines
|
|
|
7 |
# Dataset specific deps
|
8 |
py7zr<0.19
|
9 |
openpyxl<3.1
|
|
|
4 |
datasets<2.3
|
5 |
evaluate<0.2
|
6 |
jsonlines
|
7 |
+
typer[rich]
|
8 |
# Dataset specific deps
|
9 |
py7zr<0.19
|
10 |
openpyxl<3.1
|
run_evaluation_jobs.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import typer
|
5 |
+
from datasets import load_dataset
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
from rich import print
|
8 |
+
|
9 |
+
from utils import http_get, http_post
|
10 |
+
|
11 |
+
if Path(".env").is_file():
|
12 |
+
load_dotenv(".env")
|
13 |
+
|
14 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
15 |
+
AUTOTRAIN_TOKEN = os.getenv("AUTOTRAIN_TOKEN")
|
16 |
+
AUTOTRAIN_USERNAME = os.getenv("AUTOTRAIN_USERNAME")
|
17 |
+
AUTOTRAIN_BACKEND_API = os.getenv("AUTOTRAIN_BACKEND_API")
|
18 |
+
|
19 |
+
|
20 |
+
def main():
|
21 |
+
logs_df = load_dataset("autoevaluate/evaluation-job-logs", use_auth_token=True, split="train").to_pandas()
|
22 |
+
evaluated_projects_ds = load_dataset("autoevaluate/evaluated-project-ids", use_auth_token=True, split="train")
|
23 |
+
projects_df = logs_df.copy()[(~logs_df["project_id"].isnull()) & (logs_df["is_evaluated"] == False)]
|
24 |
+
projects_to_approve = projects_df["project_id"].astype(int).tolist()
|
25 |
+
|
26 |
+
for project_id in projects_to_approve:
|
27 |
+
project_status = http_get(
|
28 |
+
path=f"/projects/{project_id}",
|
29 |
+
token=HF_TOKEN,
|
30 |
+
domain=AUTOTRAIN_BACKEND_API,
|
31 |
+
).json()
|
32 |
+
if project_status["status"] == 3:
|
33 |
+
train_job_resp = http_post(
|
34 |
+
path=f"/projects/{project_id}/start_training",
|
35 |
+
token=HF_TOKEN,
|
36 |
+
domain=AUTOTRAIN_BACKEND_API,
|
37 |
+
).json()
|
38 |
+
print(f"πββοΈ Project {project_id} approval response: {train_job_resp}")
|
39 |
+
# if train_job_resp["approved"] == True:
|
40 |
+
# # Update evaluation status
|
41 |
+
|
42 |
+
|
43 |
+
if __name__ == "__main__":
|
44 |
+
typer.run(main)
|