bf16: true cutoff_len: 1024 dataset: mathinstruct dataset_dir: data ddp_timeout: 180000000 do_train: true finetuning_type: lora flash_attn: auto gradient_accumulation_steps: 8 include_num_input_tokens_seen: true learning_rate: 5.0e-05 logging_steps: 5 lora_alpha: 2 lora_dropout: 0 lora_rank: 1 lora_target: all lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 100000 model_name_or_path: D:/models/Qwen2.5-0.5B num_train_epochs: 1.0 optim: adamw_torch output_dir: saves\Qwen2.5-0.5B\lora\train_2024-09-26-14-50-59 packing: false per_device_train_batch_size: 2 plot_loss: true preprocessing_num_workers: 16 quantization_bit: 4 quantization_method: bitsandbytes report_to: all save_steps: 100 stage: sft template: qwen warmup_steps: 0