File size: 5,416 Bytes
4b32a7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
# base_model: meta-llama/Meta-Llama-3-8B
base_model: winglian/Llama-3-8b-64k-PoSE
model_type: LlamaForCausalLM
tokenizer_type: AutoTokenizer
load_in_8bit: true
load_in_4bit: false
strict: false
datasets:
- path: cerebras/SlimPajama-627B
data_files:
- train/chunk1/example_train_0.jsonl.zst
- train/chunk1/example_train_1.jsonl.zst
- train/chunk1/example_train_2.jsonl.zst
- train/chunk1/example_train_3.jsonl.zst
- train/chunk1/example_train_4.jsonl.zst
- train/chunk1/example_train_5.jsonl.zst
- train/chunk1/example_train_6.jsonl.zst
- train/chunk1/example_train_7.jsonl.zst
- train/chunk1/example_train_8.jsonl.zst
- train/chunk1/example_train_9.jsonl.zst
- train/chunk1/example_train_10.jsonl.zst
- train/chunk1/example_train_11.jsonl.zst
- train/chunk1/example_train_12.jsonl.zst
- train/chunk1/example_train_13.jsonl.zst
- train/chunk1/example_train_14.jsonl.zst
- train/chunk1/example_train_15.jsonl.zst
- train/chunk1/example_train_16.jsonl.zst
- train/chunk1/example_train_17.jsonl.zst
- train/chunk1/example_train_18.jsonl.zst
- train/chunk1/example_train_19.jsonl.zst
- train/chunk1/example_train_20.jsonl.zst
- train/chunk1/example_train_21.jsonl.zst
- train/chunk1/example_train_22.jsonl.zst
- train/chunk1/example_train_23.jsonl.zst
- train/chunk1/example_train_24.jsonl.zst
- train/chunk1/example_train_25.jsonl.zst
- train/chunk1/example_train_26.jsonl.zst
- train/chunk1/example_train_27.jsonl.zst
- train/chunk1/example_train_28.jsonl.zst
- train/chunk1/example_train_29.jsonl.zst
- train/chunk1/example_train_30.jsonl.zst
- train/chunk1/example_train_31.jsonl.zst
- train/chunk1/example_train_32.jsonl.zst
- train/chunk1/example_train_33.jsonl.zst
- train/chunk1/example_train_34.jsonl.zst
- train/chunk1/example_train_35.jsonl.zst
- train/chunk1/example_train_36.jsonl.zst
- train/chunk1/example_train_37.jsonl.zst
- train/chunk1/example_train_38.jsonl.zst
- train/chunk1/example_train_39.jsonl.zst
- train/chunk1/example_train_40.jsonl.zst
- train/chunk1/example_train_41.jsonl.zst
- train/chunk1/example_train_42.jsonl.zst
- train/chunk1/example_train_43.jsonl.zst
- train/chunk1/example_train_44.jsonl.zst
- train/chunk1/example_train_45.jsonl.zst
- train/chunk1/example_train_46.jsonl.zst
- train/chunk1/example_train_47.jsonl.zst
- train/chunk1/example_train_48.jsonl.zst
- train/chunk1/example_train_49.jsonl.zst
- train/chunk1/example_train_50.jsonl.zst
- train/chunk1/example_train_51.jsonl.zst
- train/chunk1/example_train_52.jsonl.zst
- train/chunk1/example_train_53.jsonl.zst
- train/chunk1/example_train_54.jsonl.zst
- train/chunk1/example_train_55.jsonl.zst
- train/chunk1/example_train_56.jsonl.zst
- train/chunk1/example_train_57.jsonl.zst
- train/chunk1/example_train_58.jsonl.zst
- train/chunk1/example_train_59.jsonl.zst
- train/chunk1/example_train_60.jsonl.zst
- train/chunk1/example_train_61.jsonl.zst
- train/chunk1/example_train_62.jsonl.zst
- train/chunk1/example_train_63.jsonl.zst
- train/chunk1/example_train_64.jsonl.zst
- train/chunk1/example_train_65.jsonl.zst
- train/chunk1/example_train_66.jsonl.zst
- train/chunk1/example_train_67.jsonl.zst
- train/chunk1/example_train_68.jsonl.zst
- train/chunk1/example_train_69.jsonl.zst
- train/chunk1/example_train_70.jsonl.zst
- train/chunk1/example_train_71.jsonl.zst
- train/chunk1/example_train_72.jsonl.zst
- train/chunk1/example_train_73.jsonl.zst
- train/chunk1/example_train_74.jsonl.zst
- train/chunk1/example_train_75.jsonl.zst
- train/chunk1/example_train_76.jsonl.zst
- train/chunk1/example_train_77.jsonl.zst
- train/chunk1/example_train_78.jsonl.zst
- train/chunk1/example_train_79.jsonl.zst
type: completion
split: train
dataset_prepared_path: last_run_prepared
val_set_size: 0.001
output_dir: ./llama-3-32k
save_safetensors: true
sequence_len: 8192
sample_packing: false
pad_to_sequence_len: false
use_pose: true
pose_max_context_len: 262144
min_sample_len: 6144
pose_num_chunks: 16
curriculum_sampling: true
overrides_of_model_config:
rope_theta: 500000.0
max_position_embeddings: 262144
# peft_use_dora: true
adapter: lora
peft_use_rslora: true
lora_model_dir:
lora_r: 1024
lora_alpha: 1024
lora_dropout: 0.1
lora_target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
wandb_project: llama-3-262k
wandb_entity: oaaic
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 8
micro_batch_size: 1
num_epochs: 1
optimizer: adamw_bnb_8bit
lr_scheduler: cosine
learning_rate: 0.00001
max_grad_norm: 1.0
adam_beta2: 0.95
train_on_inputs: false
group_by_length: false
bf16: true
fp16:
tf32: true
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
sdp_attention:
s2_attention:
warmup_steps: 10
evals_per_epoch: 8
saves_per_epoch: 8
debug:
deepspeed:
weight_decay: 0.0
fsdp:
fsdp_config:
special_tokens:
pad_token: <|end_of_text|>
|