Namespace(model_name_or_path='/media/nmitchko/NVME/text-generation-webui/models/codellama_CodeLlama-34b-hf', trust_remote_code=True, use_auth_token=False, eval_dataset_size=1024, max_train_samples=None, max_eval_samples=1000, source_max_len=16, target_max_len=512, dataset='i2b2.json', dataset_format='alpaca', output_dir='/media/ai/blk/loras/i2b2training', overwrite_output_dir=False, do_train=True, do_eval=True, do_predict=False, evaluation_strategy=, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=16, eval_accumulation_steps=None, eval_delay=0, learning_rate=0.0001, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=0.3, num_train_epochs=3.0, max_steps=4500, lr_scheduler_type=, warmup_ratio=0.03, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/media/ai/blk/loras/i2b2training/runs/Aug31_13-33-49_ai-server-1', logging_strategy=, logging_first_step=False, logging_steps=100, logging_nan_inf_filter=True, save_strategy=, save_steps=200, save_total_limit=40, save_safetensors=False, save_on_each_node=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=0, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=1000, dataloader_num_workers=2, past_index=-1, run_name='/media/ai/blk/loras/i2b2training', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model=None, greater_is_better=None, ignore_data_skip=False, sharded_ddp=[], fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, deepspeed=None, label_smoothing_factor=0.0, optim=, optim_args=None, adafactor=False, group_by_length=True, length_column_name='length', report_to=[], ddp_find_unused_parameters=False, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, include_inputs_for_metrics=False, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=7200, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, cache_dir=None, train_on_source=False, mmlu_split='eval', mmlu_dataset='mmlu-fs', do_mmlu_eval=False, max_mmlu_samples=None, mmlu_source_max_len=2048, full_finetune=False, adam8bit=False, double_quant=True, quant_type='nf4', bits=4, lora_r=64, lora_alpha=16.0, lora_dropout=0.05, max_memory_MB=80000, distributed_state=Distributed environment: DistributedType.MULTI_GPU Backend: nccl Num processes: 2 Process index: 0 Local process index: 0 Device: cuda:0 , _n_gpu=1, __cached__setup_devices=device(type='cuda', index=0), deepspeed_plugin=None, _frozen=True) loading base model /media/nmitchko/NVME/text-generation-webui/models/codellama_CodeLlama-34b-hf... Namespace(model_name_or_path='/media/nmitchko/NVME/text-generation-webui/models/codellama_CodeLlama-34b-hf', trust_remote_code=True, use_auth_token=False, eval_dataset_size=1024, max_train_samples=None, max_eval_samples=1000, source_max_len=16, target_max_len=512, dataset='i2b2.json', dataset_format='alpaca', output_dir='/media/ai/blk/loras/i2b2training', overwrite_output_dir=False, do_train=True, do_eval=True, do_predict=False, evaluation_strategy=, prediction_loss_only=False, per_device_train_batch_size=2, per_device_eval_batch_size=2, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=16, eval_accumulation_steps=None, eval_delay=0, learning_rate=0.0001, weight_decay=0.0, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=0.3, num_train_epochs=3.0, max_steps=4500, lr_scheduler_type=, warmup_ratio=0.03, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/media/ai/blk/loras/i2b2training/runs/Aug31_13-33-49_ai-server-1', logging_strategy=, logging_first_step=False, logging_steps=100, logging_nan_inf_filter=True, save_strategy=, save_steps=200, save_total_limit=40, save_safetensors=False, save_on_each_node=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=0, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=1, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=1000, dataloader_num_workers=2, past_index=-1, run_name='/media/ai/blk/loras/i2b2training', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model=None, greater_is_better=None, ignore_data_skip=False, sharded_ddp=[], fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, deepspeed=None, label_smoothing_factor=0.0, optim=, optim_args=None, adafactor=False, group_by_length=True, length_column_name='length', report_to=[], ddp_find_unused_parameters=False, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=, hub_token=None, hub_private_repo=False, hub_always_push=False, gradient_checkpointing=True, include_inputs_for_metrics=False, fp16_backend='auto', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=7200, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, cache_dir=None, train_on_source=False, mmlu_split='eval', mmlu_dataset='mmlu-fs', do_mmlu_eval=False, max_mmlu_samples=None, mmlu_source_max_len=2048, full_finetune=False, adam8bit=False, double_quant=True, quant_type='nf4', bits=4, lora_r=64, lora_alpha=16.0, lora_dropout=0.05, max_memory_MB=80000, distributed_state=Distributed environment: DistributedType.MULTI_GPU Backend: nccl Num processes: 2 Process index: 1 Local process index: 1 Device: cuda:1 , _n_gpu=1, __cached__setup_devices=device(type='cuda', index=1), deepspeed_plugin=None, _frozen=True) loading base model /media/nmitchko/NVME/text-generation-webui/models/codellama_CodeLlama-34b-hf... Adding special tokens. adding LoRA modules... Adding special tokens. adding LoRA modules... loaded model DatasetDict({ train: Dataset({ features: ['output', 'input'], num_rows: 6114 }) test: Dataset({ features: ['output', 'input'], num_rows: 680 }) }) Splitting train dataset in train and validation according to `eval_dataset_size` trainable params: 217841664.0 || all params: 17570209792 || trainable: 1.2398353040678365 torch.bfloat16 959971328 0.05463630425386784 torch.uint8 16609443840 0.9453184701051519 torch.float32 794624 4.522564098021215e-05 loaded model DatasetDict({ train: Dataset({ features: ['output', 'input'], num_rows: 6114 }) test: Dataset({ features: ['output', 'input'], num_rows: 680 }) }) Splitting train dataset in train and validation according to `eval_dataset_size` trainable params: 217841664.0 || all params: 17570209792 || trainable: 1.2398353040678365 torch.bfloat16 959971328 0.05463630425386784 torch.uint8 16609443840 0.9453184701051519 torch.float32 794624 4.522564098021215e-05 {'loss': 0.1991, 'learning_rate': 0.0001, 'epoch': 1.26} {'loss': 0.076, 'learning_rate': 0.0001, 'epoch': 2.51} Saving PEFT checkpoint... Saving PEFT checkpoint... {'loss': 0.061, 'learning_rate': 0.0001, 'epoch': 3.77} {'loss': 0.0522, 'learning_rate': 0.0001, 'epoch': 5.03} Saving PEFT checkpoint... Saving PEFT checkpoint... {'loss': 0.0471, 'learning_rate': 0.0001, 'epoch': 6.28} {'loss': 0.044, 'learning_rate': 0.0001, 'epoch': 7.54} Saving PEFT checkpoint... Saving PEFT checkpoint... {'loss': 0.0411, 'learning_rate': 0.0001, 'epoch': 8.8} {'loss': 0.0383, 'learning_rate': 0.0001, 'epoch': 10.05} Saving PEFT checkpoint... Saving PEFT checkpoint...