import os | |
from fastapi import FastAPI | |
import wandb | |
from huggingface_hub import HfApi | |
TOKEN = os.environ.get("DATACOMP_TOKEN") | |
API = HfApi(token=TOKEN) | |
wandb_api_key = os.environ.get('wandb_api_key') | |
wandb.login(key=wandb_api_key) | |
EXPERIMENT = "imagenet-1k-random-20.0-frac-1over2" | |
# Input dataset | |
INPUT = f"datacomp/{EXPERIMENT}" | |
# Output for files and Space ID | |
OUTPUT = f"datacomp/ImagenetTraining-{EXPERIMENT}" | |
app = FastAPI() | |
def start_train(): | |
os.system("echo 'Space started!'") | |
os.system("echo pwd") | |
os.system("pwd") | |
os.system("echo ls") | |
os.system("ls") | |
os.system("echo 'creating dataset for output files if it doesn't exist...'") | |
try: | |
API.create_repo(repo_id=OUTPUT, repo_type="dataset",) | |
except: | |
pass | |
#space_variables = API.get_space_variables(repo_id=SPACE_ID) | |
#if 'STATUS' not in space_variables or space_variables['STATUS'] != 'COMPUTING': | |
os.system("echo 'Beginning processing.'") | |
# API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='COMPUTING') | |
# Handles CUDA OOM errors. | |
os.system(f"export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True") | |
# Prints more informative CUDA errors (I think? I've forgotten now.) | |
os.system("export CUDA_LAUNCH_BLOCKING=1") | |
os.system("echo 'Okay, trying training.'") | |
os.system(f"cd pytorch-image-models; ./train.sh 4 --dataset hfds/{INPUT} --log-wandb --experiment {EXPERIMENT} --model seresnet34 --sched cosine --epochs 150 --warmup-epochs 5 --lr 0.4 --reprob 0.5 --remode pixel --batch-size 256 --amp -j 4") | |
os.system("echo ls") | |
os.system("ls") | |
os.system("echo 'trying to upload...'") | |
API.upload_large_folder(folder_path="/app", repo_id=OUTPUT, repo_type="dataset",) | |
#API.add_space_variable(repo_id=SPACE_ID, key='STATUS', value='NOT_COMPUTING') | |
#API.pause_space(SPACE_ID) | |
return {"Completed": "!"} | |