llama2-jammuGPT / training_params.json
ishavverma's picture
Upload folder using huggingface_hub
f0434ee verified
raw
history blame
1.31 kB
{
"model": "abhishek/llama-2-7b-hf-small-shards",
"project_name": "llama2-jammuGPT",
"data_path": "llama2-jammuGPT/autotrain-data",
"train_split": "train",
"valid_split": null,
"add_eos_token": false,
"block_size": -1,
"model_max_length": 1024,
"padding": null,
"trainer": "sft",
"use_flash_attention_2": false,
"log": "none",
"disable_gradient_checkpointing": false,
"logging_steps": -1,
"evaluation_strategy": "epoch",
"save_total_limit": 1,
"save_strategy": "epoch",
"auto_find_batch_size": false,
"mixed_precision": null,
"lr": 0.0002,
"epochs": 2,
"batch_size": 2,
"warmup_ratio": 0.5,
"gradient_accumulation": 1,
"optimizer": "adamw_torch",
"scheduler": "linear",
"weight_decay": 0.0,
"max_grad_norm": 1.0,
"seed": 42,
"chat_template": null,
"quantization": "int4",
"target_modules": null,
"merge_adapter": false,
"peft": true,
"lora_r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"model_ref": null,
"dpo_beta": 0.1,
"prompt_text_column": "autotrain_prompt",
"text_column": "autotrain_text",
"rejected_text_column": "autotrain_rejected_text",
"push_to_hub": true,
"repo_id": "ishavverma/llama2-jammuGPT",
"username": null
}