|
--- |
|
license: apache-2.0 |
|
datasets: |
|
- adamo1139/rawrr_v2-2_stage1 |
|
--- |
|
## Model description |
|
|
|
This is a base Yi-34B-200K XLCTX model treated with DPO with adamo1139/rawrr_v2-2_stage1 dataset to make outputs be completions instead of answers for a question. DPO was done using chatml format but no previous SFT step was done. If it would do it now, I would have used ORPO instead of DPO for this step to make it stronger, but too late for that. It can be used to maybe slightly decensor a model, but I don't think this idea works too well with DPO before SFT step, as was widely known but I did it anyway. |
|
|
|
|
|
|
|
[<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" alt="made with Unsloth" width="400" height="64"/>](https://github.com/unslothai/unsloth) |
|
|
|
## Training script for Unsloth |
|
|
|
``` |
|
from unsloth import FastLanguageModel |
|
from datasets import Dataset, load_dataset |
|
from dataclasses import dataclass, field |
|
from typing import Dict, Optional |
|
import torch |
|
max_seq_length = 4096 # Choose any! We auto support RoPE Scaling internally! |
|
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ |
|
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. |
|
|
|
model, tokenizer = FastLanguageModel.from_pretrained( |
|
model_name = "adamo1139/Yi-34B-200K-XLCTX", # Choose ANY! eg mistralai/Mistral-7B-Instruct-v0.2 |
|
max_seq_length = max_seq_length, |
|
attn_implementation="flash_attention_2", |
|
dtype = dtype, |
|
load_in_4bit = load_in_4bit, |
|
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf |
|
) |
|
|
|
|
|
|
|
#@title Alignment Handbook utils |
|
import os |
|
import re |
|
from typing import List, Literal, Optional |
|
|
|
from datasets import DatasetDict, concatenate_datasets, load_dataset, load_from_disk |
|
from datasets.builder import DatasetGenerationError |
|
|
|
|
|
#DEFAULT_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" |
|
tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" |
|
|
|
|
|
EOS_TOKEN = tokenizer.eos_token |
|
|
|
def chatml_format(example): |
|
# Format system |
|
if len(example['system']) > 0: |
|
message = {"role": "system", "content": example['system']} |
|
system = tokenizer.apply_chat_template([message], tokenize=False) |
|
else: |
|
system = "" |
|
|
|
# Format instruction |
|
message = {"role": "user", "content": example['prompt']} |
|
prompt = tokenizer.apply_chat_template([message], tokenize=False, add_generation_prompt=True) |
|
|
|
# Format chosen answer |
|
chosen = example['chosen'] + "<|im_end|>\n" + EOS_TOKEN |
|
|
|
# Format rejected answer |
|
rejected = example['rejected'] + "<|im_end|>\n" + EOS_TOKEN |
|
|
|
return { |
|
"prompt": system + prompt, |
|
"chosen": chosen, |
|
"rejected": rejected, |
|
} |
|
|
|
# Load dataset |
|
dataset = load_dataset("adamo1139/rawrr_v2-2_stage1", split="train") |
|
|
|
import pprint |
|
pprint.pprint("""NOT a formatted dataset |
|
""") |
|
pprint |
|
pprint.pprint(dataset[250]) |
|
pprint.pprint(dataset[260]) |
|
pprint.pprint(dataset[270]) |
|
pprint.pprint(dataset[280]) |
|
pprint.pprint(dataset[290]) |
|
# Save columns |
|
original_columns = dataset.column_names |
|
|
|
# Format dataset |
|
dataset = dataset.map( |
|
chatml_format, |
|
remove_columns=original_columns |
|
) |
|
|
|
# Print sample |
|
pprint.pprint("""formatted dataset""") |
|
pprint.pprint(dataset[250]) |
|
pprint.pprint(dataset[260]) |
|
pprint.pprint(dataset[270]) |
|
pprint.pprint(dataset[280]) |
|
pprint.pprint(dataset[290]) |
|
|
|
|
|
model = FastLanguageModel.get_peft_model( |
|
model, |
|
r = 32, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128 |
|
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", |
|
"gate_proj", "up_proj", "down_proj",], |
|
lora_alpha = 32, |
|
lora_dropout = 0, # Currently only supports dropout = 0 |
|
bias = "none", # Currently only supports bias = "none" |
|
use_gradient_checkpointing = "unsloth", |
|
random_state = 3407, |
|
use_rslora = False, # We support rank stabilized LoRA |
|
loftq_config = None, # And LoftQ |
|
) |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments |
|
from trl import DPOTrainer |
|
|
|
dpo_trainer = DPOTrainer( |
|
model = model, |
|
ref_model = None, |
|
args = TrainingArguments( |
|
per_device_train_batch_size = 1, |
|
gradient_accumulation_steps = 16, |
|
warmup_ratio = 0.03, |
|
num_train_epochs = 1, |
|
learning_rate = 0.0001, |
|
fp16 = not torch.cuda.is_bf16_supported(), |
|
bf16 = torch.cuda.is_bf16_supported(), |
|
logging_steps = 1, |
|
optim = "adamw_8bit", |
|
weight_decay = 0.0, |
|
lr_scheduler_type = "cosine", |
|
seed = 42, |
|
save_strategy = "steps", |
|
save_steps = 100, |
|
save_total_limit = 20, |
|
output_dir = "1904-yi-200k-xlctx-raw-intermediate", |
|
), |
|
beta = 0.1, |
|
train_dataset = dataset, |
|
# eval_dataset = raw_datasets["test"], |
|
tokenizer = tokenizer, |
|
max_length = 650, |
|
max_prompt_length = 650, |
|
) |
|
dpo_trainer.train() |
|
model.save_pretrained("1904-yi-200k-xlctx-raw-final") # Local saving |
|
``` |