finetuned-danube / cfg.yaml
quarkx33's picture
Upload cfg.yaml
08f8767 verified
architecture:
backbone_dtype: int4
force_embedding_gradients: false
gradient_checkpointing: true
intermediate_dropout: 0.0
pretrained: true
pretrained_weights: ''
augmentation:
neftune_noise_alpha: 0.0
random_parent_probability: 0.0
skip_parent_probability: 0.0
token_mask_probability: 0.0
dataset:
add_eos_token_to_answer: true
add_eos_token_to_prompt: true
add_eos_token_to_system: true
answer_column: response
chatbot_author: H2O.ai
chatbot_name: h2oGPT
data_sample: 1.0
data_sample_choice:
- Train
- Validation
limit_chained_samples: false
mask_prompt_labels: true
parent_id_column: None
personalize: false
prompt_column:
- instruction
system_column: None
text_answer_separator: <|answer|>
text_prompt_start: <|prompt|>
text_system_start: <|system|>
train_dataframe: /home/llmstudio/mount/data/user/CFA_options_tutorial/CFA_options_tutorial.csv
validation_dataframe: None
validation_size: 0.2
validation_strategy: automatic
environment:
compile_model: false
deepspeed_reduce_bucket_size: 1000000
deepspeed_stage3_param_persistence_threshold: 1000000
deepspeed_stage3_prefetch_bucket_size: 1000000
find_unused_parameters: false
gpus:
- '0'
huggingface_branch: main
mixed_precision: true
number_of_workers: 8
seed: 1234
trust_remote_code: true
use_deepspeed: false
experiment_name: finetuned_danube
llm_backbone: h2oai/h2o-danube-1.8b-chat
logging:
logger: None
neptune_project: ''
output_directory: /home/llmstudio/mount/output/user/finetuned_danube/
prediction:
batch_size_inference: 0
do_sample: false
max_length_inference: 256
metric: BLEU
metric_gpt_model: gpt-3.5-turbo-0301
metric_gpt_template: general
min_length_inference: 2
num_beams: 1
num_history: 4
repetition_penalty: 1.2
stop_tokens: ''
temperature: 0.0
top_k: 0
top_p: 1.0
problem_type: text_causal_language_modeling
tokenizer:
add_prefix_space: false
add_prompt_answer_tokens: false
max_length: 512
max_length_answer: 256
max_length_prompt: 256
padding_quantile: 1.0
use_fast: true
training:
batch_size: 2
differential_learning_rate: 1.0e-05
differential_learning_rate_layers: []
drop_last_batch: true
epochs: 1
evaluate_before_training: true
evaluation_epochs: 1.0
grad_accumulation: 1
gradient_clip: 0.0
learning_rate: 0.0001
lora: true
lora_alpha: 16
lora_dropout: 0.05
lora_r: 4
lora_target_modules: ''
loss_function: TokenAveragedCrossEntropy
optimizer: AdamW
save_best_checkpoint: false
schedule: Cosine
train_validation_data: false
use_flash_attention_2: false
warmup_epochs: 0.0
weight_decay: 0.0