|
import os |
|
import torch |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
print(device) |
|
|
|
|
|
from datasets import load_dataset |
|
from transformers import ( |
|
AutoModelForCausalLM, |
|
AutoTokenizer, |
|
BitsAndBytesConfig, |
|
HfArgumentParser, |
|
TrainingArguments, |
|
pipeline, |
|
logging, |
|
LlamaTokenizerFast |
|
) |
|
from peft import LoraConfig, PeftModel, get_peft_model |
|
from trl import SFTTrainer |
|
|
|
|
|
model_name = "mistral-hermes-2.5" |
|
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
|
new_model_name = "mistral-mfs-reference-2" |
|
|
|
|
|
output_dir = "./mistral-mfs-reference-2" |
|
|
|
|
|
tb_log_dir = "./mistral-mfs-reference-2/logs" |
|
|
|
|
|
max_steps = 2000 |
|
|
|
|
|
per_device_train_batch_size = 4 |
|
learning_rate = 2e-5 |
|
max_seq_length = 4096 |
|
save_steps = 1000 |
|
|
|
lr_scheduler_type = "linear" |
|
|
|
|
|
|
|
local_rank = -1 |
|
per_device_eval_batch_size = 1 |
|
gradient_accumulation_steps = 4 |
|
max_grad_norm = 0.3 |
|
weight_decay = 0.001 |
|
lora_alpha = 16 |
|
lora_dropout = 0.1 |
|
lora_r = 64 |
|
|
|
group_by_length = True |
|
|
|
|
|
use_4bit = True |
|
|
|
|
|
use_nested_quant = False |
|
|
|
|
|
bnb_4bit_compute_dtype = "float16" |
|
|
|
|
|
bnb_4bit_quant_type = "nf4" |
|
|
|
|
|
num_train_epochs = 1 |
|
|
|
|
|
fp16 = True |
|
|
|
|
|
bf16 = False |
|
|
|
|
|
packing = False |
|
|
|
|
|
gradient_checkpointing = True |
|
|
|
|
|
optim = "paged_adamw_32bit" |
|
|
|
|
|
warmup_ratio = 0.03 |
|
|
|
|
|
logging_steps = 1 |
|
|
|
|
|
device_map = {"": 0} |
|
|
|
|
|
report_to = "tensorboard" |
|
|
|
|
|
|
|
peft_config = LoraConfig( |
|
lora_alpha=lora_alpha, |
|
lora_dropout=lora_dropout, |
|
r=lora_r, |
|
inference_mode=False, |
|
task_type="CAUSAL_LM", |
|
target_modules = ["q_proj", "v_proj"] |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
from datasets import load_dataset |
|
|
|
def format_alpaca(sample): |
|
prompt = f"{sample['conversation']}" |
|
return prompt |
|
|
|
|
|
def template_dataset(sample): |
|
sample["text"] = f"{format_alpaca(sample)}{tokenizer.eos_token}" |
|
return sample |
|
|
|
|
|
|
|
data_files = {"train": "corpus_guillaume_tell_2.json"} |
|
dataset = load_dataset("json", data_files=data_files, split="train") |
|
|
|
|
|
dataset_shuffled = dataset.shuffle(seed=42) |
|
|
|
|
|
|
|
|
|
|
|
dataset = dataset.map(template_dataset, remove_columns=list(dataset.features)) |
|
|
|
print(dataset[40]) |
|
|
|
|
|
|
|
|
|
compute_dtype = getattr(torch, bnb_4bit_compute_dtype) |
|
|
|
bnb_config = BitsAndBytesConfig( |
|
load_in_4bit=use_4bit, |
|
bnb_4bit_quant_type=bnb_4bit_quant_type, |
|
bnb_4bit_compute_dtype=compute_dtype, |
|
bnb_4bit_use_double_quant=use_nested_quant, |
|
) |
|
|
|
if compute_dtype == torch.float16 and use_4bit: |
|
major, _ = torch.cuda.get_device_capability() |
|
if major >= 8: |
|
print("=" * 80) |
|
print("Your GPU supports bfloat16, you can accelerate training with the argument --bf16") |
|
print("=" * 80) |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
model_name, |
|
device_map=device_map, |
|
quantization_config=bnb_config |
|
) |
|
|
|
model.config.use_cache = False |
|
model.config.pretraining_tp = 1 |
|
|
|
|
|
|
|
torch.cuda.empty_cache() |
|
|
|
training_arguments = TrainingArguments( |
|
output_dir=output_dir, |
|
per_device_train_batch_size=per_device_train_batch_size, |
|
gradient_accumulation_steps=gradient_accumulation_steps, |
|
gradient_checkpointing=True, |
|
optim=optim, |
|
save_steps=save_steps, |
|
logging_steps=logging_steps, |
|
learning_rate=learning_rate, |
|
fp16=fp16, |
|
bf16=bf16, |
|
max_grad_norm=max_grad_norm, |
|
max_steps=max_steps, |
|
warmup_ratio=warmup_ratio, |
|
group_by_length=group_by_length, |
|
lr_scheduler_type=lr_scheduler_type, |
|
report_to="tensorboard" |
|
) |
|
|
|
trainer = SFTTrainer( |
|
model=model, |
|
train_dataset=dataset, |
|
peft_config=peft_config, |
|
dataset_text_field="text", |
|
max_seq_length=max_seq_length, |
|
tokenizer=tokenizer, |
|
args=training_arguments, |
|
packing=packing |
|
) |
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
model_to_save = trainer.model.module if hasattr(trainer.model, 'module') else trainer.model |
|
model_to_save.save_pretrained(new_model_name) |
|
|
|
torch.cuda.empty_cache() |
|
|
|
from peft import AutoPeftModelForCausalLM |
|
|
|
model = AutoPeftModelForCausalLM.from_pretrained(new_model_name, device_map="auto", torch_dtype=torch.bfloat16) |
|
model = model.merge_and_unload() |
|
|
|
output_merged_dir = os.path.join(new_model_name, new_model_name) |
|
model.save_pretrained(output_merged_dir, safe_serialization=True) |
|
|
|
tokenizer.save_pretrained(output_merged_dir) |