|
""" |
|
Training script for baseline NanoGPT model on enwik8 dataset. |
|
Ensures proper bpc calculation and comparable evaluation with DTAT. |
|
""" |
|
|
|
import os |
|
import time |
|
import math |
|
import wandb |
|
import numpy as np |
|
from tqdm import tqdm |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch.nn import CrossEntropyLoss |
|
|
|
from model_baseline import BaselineTransformer |
|
from config.baseline_config import get_config |
|
|
|
|
|
|
|
def get_batch(data, block_size, batch_size, device): |
|
"""Generate a small batch of data of inputs x and targets y.""" |
|
ix = torch.randint(len(data) - block_size, (batch_size,)) |
|
x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) |
|
y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) |
|
x, y = x.to(device), y.to(device) |
|
return x, y |
|
|
|
def estimate_loss(model, data, config): |
|
"""Estimate loss on data split, ensuring proper bpc calculation.""" |
|
model.eval() |
|
total_loss = 0.0 |
|
total_steps = config.eval_iters |
|
|
|
with torch.no_grad(): |
|
for _ in range(total_steps): |
|
X, Y = get_batch(data, config.block_size, config.batch_size, config.device) |
|
with torch.amp.autocast('cuda', enabled=config.mixed_precision): |
|
logits, loss = model(X, Y) |
|
total_loss += loss.item() |
|
|
|
model.train() |
|
return total_loss / total_steps |
|
|
|
def get_lr(it, config): |
|
""" |
|
Learning rate scheduler with linear warmup and cosine decay. |
|
Matches DTAT's scheduler exactly. |
|
""" |
|
|
|
if it < config.warmup_iters: |
|
return config.learning_rate * it / config.warmup_iters |
|
|
|
|
|
if config.decay_lr: |
|
decay_ratio = (it - config.warmup_iters) / (config.lr_decay_iters - config.warmup_iters) |
|
decay_ratio = min(decay_ratio, 1.0) |
|
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) |
|
return config.min_lr + coeff * (config.learning_rate - config.min_lr) |
|
|
|
return config.learning_rate |
|
|
|
def main(): |
|
|
|
wandb.init(project="enwik8-baseline", name="baseline-run") |
|
wandb.config.update(get_config().__dict__) |
|
|
|
|
|
config = get_config() |
|
device = config.device |
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
torch.backends.cudnn.allow_tf32 = True |
|
torch.backends.cudnn.benchmark = config.cudnn_benchmark |
|
|
|
|
|
print("Loading data...") |
|
data_dir = os.path.join('data') |
|
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint8, mode='r') |
|
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint8, mode='r') |
|
|
|
|
|
print("Initializing model...") |
|
model = BaselineTransformer(config).to(device) |
|
print(f"number of parameters: {model.get_num_params()/1e6:.2f}M") |
|
|
|
|
|
optimizer = torch.optim.AdamW( |
|
model.parameters(), |
|
lr=config.learning_rate, |
|
betas=(config.beta1, config.beta2), |
|
weight_decay=config.weight_decay |
|
) |
|
|
|
|
|
scaler = torch.amp.GradScaler('cuda', enabled=config.mixed_precision) |
|
|
|
|
|
if config.gradient_checkpointing: |
|
model.gradient_checkpointing_enable() |
|
|
|
|
|
total_steps = config.max_iters |
|
batch_size = config.batch_size |
|
block_size = config.block_size |
|
total_epochs = (total_steps * batch_size * block_size) // len(train_data) |
|
|
|
|
|
pbar = tqdm(range(config.max_iters), desc=f"Training (0/{total_epochs} epochs)") |
|
|
|
best_val_loss = float('inf') |
|
no_improvement = 0 |
|
t0 = time.time() |
|
|
|
for iter_num in pbar: |
|
|
|
if no_improvement >= config.patience: |
|
print(f"\nEarly stopping triggered after {iter_num} iterations") |
|
print(f"Best validation loss: {best_val_loss:.4f}") |
|
break |
|
|
|
|
|
lr = get_lr(iter_num, config) |
|
for param_group in optimizer.param_groups: |
|
param_group['lr'] = lr |
|
|
|
|
|
X, Y = get_batch(train_data, config.block_size, config.batch_size, device) |
|
|
|
|
|
with torch.amp.autocast('cuda', enabled=config.mixed_precision): |
|
logits, loss = model(X, Y) |
|
|
|
|
|
optimizer.zero_grad(set_to_none=True) |
|
scaler.scale(loss).backward() |
|
scaler.unscale_(optimizer) |
|
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) |
|
scaler.step(optimizer) |
|
scaler.update() |
|
|
|
|
|
if iter_num % config.log_interval == 0: |
|
|
|
current_tokens = (iter_num + 1) * batch_size * block_size |
|
current_epoch = current_tokens / len(train_data) |
|
|
|
|
|
val_loss = estimate_loss(model, val_data, config) |
|
|
|
|
|
|
|
pbar.set_description( |
|
f"Training ({current_epoch:.1f}/{total_epochs} epochs) | " |
|
f"loss: {loss.item():.4f} | " |
|
f"bpc: {loss.item():.2f} | " |
|
f"lr: {lr:.1e} | " |
|
f"tokens/sec: {(batch_size * block_size) / (time.time() - t0):.1f}" |
|
) |
|
|
|
|
|
wandb.log({ |
|
"iter": iter_num, |
|
"epoch": current_epoch, |
|
"train/loss": loss.item(), |
|
"train/bpc": loss.item(), |
|
"lr": lr, |
|
"tokens_per_sec": (batch_size * block_size) / (time.time() - t0), |
|
}) |
|
|
|
|
|
if iter_num > 0 and iter_num % 100 == 0: |
|
val_loss = estimate_loss(model, val_data, config) |
|
if val_loss < best_val_loss: |
|
best_val_loss = val_loss |
|
no_improvement = 0 |
|
print(f"\nSaving best model with val_loss: {best_val_loss:.4f}") |
|
torch.save(model.state_dict(), os.path.join(os.path.dirname(__file__), 'best_baseline.pt')) |
|
else: |
|
no_improvement += 1 |
|
|
|
|
|
wandb.log({ |
|
"val/loss": val_loss, |
|
"val/bpc": val_loss, |
|
"lr": lr, |
|
|
|
}) |
|
|
|
wandb.finish() |
|
|
|
if __name__ == '__main__': |
|
main() |
|
|