Granther's picture
Upload 5 files
fa8b708 verified
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
from huggingface_hub import HfApi, notebook_login
from datasets import load_dataset
from peft import LoraConfig, LoraModel, get_peft_model
from timm.scheduler import CosineLRScheduler
import wandb
import os
from accelerate import Accelerator
import numpy as np
import torch
import tqdm
import torch.nn as nn
import torch.optim as optim
acc = Accelerator()
lora_conf = LoraConfig(
r=8,
lora_alpha=64,
lora_dropout=0.1,
bias="none",
task_type="CAUSAL_LM",
target_modules="all-linear",
modules_to_save=None,
)
model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
dataset_id = "microsoft/orca-math-word-problems-200k"
model_kwargs = dict(
use_cache=False,
attn_implementation="flash_attention_2",
torch_dtype=torch.bfloat16,
device_map="sequential",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
model = get_peft_model(model, lora_conf)
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
trainable_params = format(count_trainable_parameters(model), ",")
epochs = 1
per_dev_batch_size = 2
gradient_accumulation_steps = 4
dtype = torch.bfloat16
learning_rate = 1e-5
raw_dataset = load_dataset(dataset_id, split="train")
def apply_chat_template(example, tokenizer):
chat = [
{"role": "user", "content": example["question"]},
{"role": "assistant", "content": example["answer"]},
]
example['text'] = tokenizer.apply_chat_template(chat, add_generation_prompt=False, tokenize=True)
return example
train_dataset = raw_dataset.select(range(150000))
test_dataset = raw_dataset.select(range(300))
column_names = list(train_dataset.features)
processed_train_dataset = train_dataset.map(
apply_chat_template,
# batched=True,
# batch_size=20,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
desc="Applying chat template to train_sft",
)
processed_test_dataset = test_dataset.map(
apply_chat_template,
# batched=True,
# batch_size=20,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
desc="Applying chat template to test_sft",
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
train_dataloader = torch.utils.data.DataLoader( #
processed_train_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=True,
collate_fn=data_collator
)
test_dataloader = torch.utils.data.DataLoader(
processed_test_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=True,
collate_fn=data_collator
)
global_step = 0
num_training_steps = epochs * len(train_dataloader)
#num_training_steps = 20000
warmup_ratio = 0.1
warmup_steps = int(warmup_ratio * num_training_steps)
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
cross_entropy = nn.CrossEntropyLoss()
scheduler = get_scheduler(
name="cosine",
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps
)
wandb.init(
project="math-tiny-llama",
config={
"learning_rate": learning_rate,
"dataset": dataset_id,
"batch_size": per_dev_batch_size,
"lora_r": lora_conf.r,
"lora_alpha": lora_conf.lora_alpha,
"lora_dropout": lora_conf.lora_dropout,
"gradient_accumulation_steps": gradient_accumulation_steps,
"warmup_ratio": warmup_ratio,
"trainable_params": trainable_params,
"num_training_steps": num_training_steps,
"model_name": "TinyLlama"
}
)
optimizer, scheduler, train_dataloader, tokenizer, model = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model)
def calc_metrics():
model.eval()
for batch in test_dataloader:
pred = model(**batch)
loss = pred.loss
wandb.log({"eval_loss": loss.item()})
model.train()
model.train()
for epoch in range(epochs):
for step, batch in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss.backward()
wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr']})
if (step + 1) % gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
if (step + 1) % 100 == 0:
print(f"Loss: {loss.item()}")
if (step + 1) % 400 == 0:
calc_metrics()
if global_step > num_training_steps:
break
if global_step > num_training_steps:
break
wandb.finish()
save_path = os.path.join("checkpoint_2_", f"step_{global_step}")
model.module.save_pretrained(save_path)
print("Saved model")