bf16: true cutoff_len: 512 dataset: llamafactory/alpaca_en dataset_dir: ONLINE ddp_timeout: 180000000 deepspeed: dcft/train/zero3.json do_train: true enable_liger_kernel: true eval_strategy: epoch finetuning_type: full formatting: alpaca global_batch_size: 512 gradient_accumulation_steps: 8 gradient_checkpointing: true hub_model_id: mlfoundations-dev/test_run_mini learning_rate: 2.0e-05 logging_steps: 10 lr_scheduler_type: cosine max_steps: 3 model_name_or_path: meta-llama/Llama-3.2-1B neat_packing: true output_dir: /data4/dcft/experiments/train/checkpoints/test_run_mini overwrite_cache: true overwrite_output_dir: true packing: true per_device_train_batch_size: 8 plot_loss: true preprocessing_num_workers: 16 push_to_db: false push_to_hub: true report_to: wandb run_name: test_run_mini save_strategy: epoch stage: sft template: alpaca val_size: 0.05 warmup_ratio: 0.1