Granther's picture
Upload 5 files
fa8b708 verified
from transformers import AutoTokenizer, AutoModelForCausalLM, DataCollatorForLanguageModeling, get_scheduler
from huggingface_hub import HfApi, notebook_login
from datasets import load_dataset
from peft import LoraConfig, LoraModel, get_peft_model
from timm.scheduler import CosineLRScheduler
import wandb
import os
from accelerate import Accelerator
import numpy as np
import torch
import tqdm
import torch.nn as nn
import torch.optim as optim
lora_conf = LoraConfig(
r=8,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules="all-linear",
modules_to_save=None,
)
model_id = "Qwen/Qwen2-1.5B-Instruct"
model_kwargs = dict(
use_cache=False,
#attn_implementation="flash_attention_2",
torch_dtype="auto",
device_map="sequential",
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.model_max_length = 4096
model = AutoModelForCausalLM.from_pretrained(model_id, **model_kwargs)
model = get_peft_model(model, lora_conf)
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
trainable_params = format(count_trainable_parameters(model), ",")
epochs = 2
per_dev_batch_size = 2
gradient_accumulation_steps = 20
dtype = torch.bfloat16
learning_rate = 1e-4
def apply_chat_template(example, tokenizer):
convo = example['conversations']
for dic in convo:
dic['role'] = dic.pop('from')
dic['content'] = dic.pop('value')
if dic['role'] == 'gpt':
dic['role'] = 'assistant'
elif dic['role'] == 'human':
dic['role'] = 'user'
example['text'] = tokenizer.apply_chat_template(convo, tokenize=True, add_generation_prompt=False, truncation=True)
return example
train_dataset = dataset.select(range(98000))
test_dataset = dataset.select(range(3000))
column_names = list(train_dataset.features)
processed_train_dataset = train_dataset.map(
apply_chat_template,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
)
processed_test_dataset = test_dataset.map(
apply_chat_template,
fn_kwargs={"tokenizer": tokenizer},
num_proc=10,
remove_columns=column_names,
)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
train_dataloader = torch.utils.data.DataLoader( #
processed_train_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=True,
collate_fn=data_collator
)
test_dataloader = torch.utils.data.DataLoader(
processed_test_dataset['text'],
batch_size=per_dev_batch_size,
shuffle=True,
collate_fn=data_collator
)
global_step = 0
num_training_steps = epochs * len(train_dataloader)
warmup_ratio = 0.1
warmup_steps = 800
#warmup_steps = int(warmup_ratio * num_training_steps)
optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
cross_entropy = nn.CrossEntropyLoss()
scheduler = get_scheduler(
name="cosine",
optimizer=optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps
)
acc = Accelerator(gradient_accumulation_steps=gradient_accumulation_steps)
if acc.is_main_process:
wandb.init(
project="qwen-hus-inst",
config={
"learning_rate": learning_rate,
"dataset": "Mix of RP and Instruct,
"batch_size": per_dev_batch_size,
"lora_r": lora_conf.r,
"lora_alpha": lora_conf.lora_alpha,
"lora_dropout": lora_conf.lora_dropout,
"gradient_accumulation_steps": gradient_accumulation_steps,
"warmup_ratio": warmup_ratio,
"trainable_params": trainable_params,
"num_training_steps": num_training_steps,
"model_name": model_id
}
)
optimizer, scheduler, train_dataloader, tokenizer, model, scheduler = acc.prepare(optimizer, scheduler, train_dataloader, tokenizer, model, scheduler)
def save_checkpoint():
if acc.is_main_process:
save_path = os.path.join("checkpoint_hus", f"step_{global_step}")
model.module.save_pretrained(save_path)
print(f"Saved model at step {global_step}")
def calc_metrics():
model.eval()
for batch in test_dataloader:
pred = model(**batch)
loss = pred.loss
if acc.is_main_process:
perplexity = torch.exp(loss)
wandb.log({"eval_loss": loss.item(), "eval_perplexity": perplexity})
model.train()
model.train()
for epoch in range(epochs):
for step, batch in enumerate(train_dataloader):
with acc.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
acc.backward(loss)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if acc.is_main_process:
perplexity = torch.exp(loss)
wandb.log({"loss": loss.item(), "learning_rate": optimizer.param_groups[0]['lr'], "perplexity": perplexity})
global_step += 1
if (step + 1) % 1000 == 0:
save_checkpoint()
if (step + 1) % 100 == 0 and acc.is_main_process:
print(f"Loss: {loss.item()}")
if (step + 1) % 2000 == 0:
calc_metrics()
if global_step > num_training_steps:
break
if global_step > num_training_steps:
break
if acc.is_main_process:
wandb.finish()
save_checkpoint()