Granther's picture
Upload 5 files
fa8b708 verified
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
from huggingface_hub import HfApi, notebook_login
from datasets import load_dataset
from peft import LoraConfig, LoraModel, get_peft_model
from timm.scheduler import CosineLRScheduler
import wandb
import os
from accelerate import Accelerator
import numpy as np
import torch
import tqdm
import torch.nn as nn
import torch.optim as optim
lora_conf = LoraConfig(
r=8,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules="all-linear",
modules_to_save=None,
)
model_id = "Qwen/Qwen2-1.5B-Instruct"
dataset_id = "HuggingFaceH4/orca-math-word-problems-200k"
model_kwargs = dict(
use_cache=False,
#attn_implementation="flash_attention_2",
torch_dtype="auto",
device_map="sequential",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.model_max_length = 2048
model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
model = get_peft_model(model, lora_conf)
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
trainable_params = format(count_trainable_parameters(model), ",")
epochs = 1
per_dev_batch_size = 1
gradient_accumulation_steps = 20
dtype = torch.bfloat16
learning_rate = 1e-4
train_dataset = load_dataset(dataset_id, split="train_sft").select(range(150000))
test_dataset = load_dataset(dataset_id, split="test_sft").select(range(100))
# def apply_chat_template(example, tokenizer):
# chat = []
# convo = example['conversations']
# for dic in convo:
# if dic['from'] == 'human':
# chat = [
# {"role": "user", "content": dic['value']},
# {"role": "assistant", "content": "skibbidy"}
# ]
# example['text'] = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=False, truncation=True)
# return example
# train_dataset = raw_dataset.select(range(10000))
# test_dataset = raw_dataset.select(range(300))
# column_names = list(train_dataset.features)
def apply_chat_template(example, tokenizer):
example['text'] = tokenizer.apply_chat_template(example['messages'], tokenize=True, add_generation_prompt=False, truncation=True)
return example
column_names = list(train_dataset.features)
processed_train_dataset = train_dataset.map(
apply_chat_template,
# batched=True,
# batch_size=20,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
)
processed_test_dataset = test_dataset.map(
apply_chat_template,
# batched=True,
# batch_size=20,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
train_dataloader = torch.utils.data.DataLoader( #
processed_train_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=False,
collate_fn=data_collator
)
test_dataloader = torch.utils.data.DataLoader(
processed_test_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=False,
collate_fn=data_collator
)
global_step = 0
num_training_steps = epochs * len(train_dataloader)
warmup_ratio = 0.1
warmup_steps = 500
#warmup_steps = int(warmup_ratio * num_training_steps)
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
cross_entropy = nn.CrossEntropyLoss()
scheduler = get_scheduler(
name="cosine",
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps
)
acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
if acc.is_main_process:
wandb.init(
project="qwen-math",
config={
"learning_rate": learning_rate,
"dataset": dataset_id,
"batch_size": per_dev_batch_size,
"lora_r": lora_conf.r,
"lora_alpha": lora_conf.lora_alpha,
"lora_dropout": lora_conf.lora_dropout,
"gradient_accumulation_steps": gradient_accumulation_steps,
"warmup_ratio": warmup_ratio,
"trainable_params": trainable_params,
"num_training_steps": num_training_steps,
"model_name": "TinyLlama"
}
)
optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
def save_checkpoint():
if acc.is_main_process:
save_path = os.path.join("checkpoint_math", f"step_{global_step}")
model.module.save_pretrained(save_path)
print(f"Saved model at step {global_step}")
def calc_metrics():
model.eval()
for batch in test_dataloader:
pred = model(**batch)
loss = pred.loss
if acc.is_main_process:
perplexity = torch.exp(loss)
wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
model.train()
device = acc.device
model.train()
for epoch in range(epochs):
for step, batch in enumerate(train_dataloader):
#print(tokenizer.decode(batch['input_ids'][0]))
# outputs = model(**batch)
# loss = outputs.loss
# acc.backward(loss)
# wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
with acc.accumulate(model):
#batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
acc.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if acc.is_main_process:
perplexity = torch.exp(loss)
wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
global_step += 1
if (step + 1) % 1000 == 0:
save_checkpoint()
# if (step + 1) % gradient_accumulation_steps == 0:
# optimizer.step()
# scheduler.step()
# optimizer.zero_grad()
# global_step += 1
if (step + 1) % 100 == 0 and acc.is_main_process:
print(f"Loss: {loss.item()}")
if (step + 1) % 400 == 0:
calc_metrics()
if global_step > num_training_steps:
break
if global_step > num_training_steps:
break
if acc.is_main_process:
wandb.finish()
save_checkpoint()