File size: 2,755 Bytes
fb0419e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
architecture:
backbone_dtype: bfloat16
force_embedding_gradients: false
gradient_checkpointing: true
intermediate_dropout: 0.0
pretrained: true
pretrained_weights: ''
siamese: true
augmentation:
neftune_noise_alpha: 0.0
random_parent_probability: 0.0
skip_parent_probability: 0.0
token_mask_probability: 0.0
dataset:
add_eos_token_to_answer: false
add_eos_token_to_prompt: false
add_eos_token_to_system: false
answer_column: label
chatbot_author: H2O.ai
chatbot_name: h2oGPT
data_sample: 1.0
data_sample_choice: []
limit_chained_samples: false
mask_prompt_labels: true
num_classes: 3
parent_id_column: None
personalize: false
prompt_column:
- text
system_column: None
text_answer_separator: ''
text_prompt_start: ''
text_system_start: ''
train_dataframe: /home/philipp/h2o-llmstudio/data/user/lmsys/train_siamese_v1.pq
validation_dataframe: /home/philipp/h2o-llmstudio/data/user/lmsys/val_siamese_v1.pq
validation_size: 0.1
validation_strategy: custom
environment:
compile_model: false
deepspeed_allgather_bucket_size: 1000000
deepspeed_method: ZeRO2
deepspeed_reduce_bucket_size: 1000000
deepspeed_stage3_param_persistence_threshold: 1000000
deepspeed_stage3_prefetch_bucket_size: 1000000
find_unused_parameters: false
gpus:
- '0'
- '1'
- '2'
huggingface_branch: main
mixed_precision: true
mixed_precision_dtype: bfloat16
number_of_workers: 8
seed: -1
trust_remote_code: true
use_deepspeed: false
experiment_name: important-monkey-siamese-v2
llm_backbone: microsoft/Phi-3-mini-4k-instruct
logging:
logger: Neptune
neptune_project: Zoo/kaggle-lmsys
output_directory: /home/philipp/h2o-llmstudio/output/user/important-monkey-siamese-v2/
prediction:
batch_size_inference: 0
metric: LogLoss
problem_type: text_causal_classification_modeling
tokenizer:
add_prompt_answer_tokens: false
max_length: 4096
max_length_answer: 4096
max_length_prompt: 4096
padding_quantile: 1.0
use_fast: true
training:
batch_size: 2
differential_learning_rate: 1.0e-05
differential_learning_rate_layers:
- classification_head
drop_last_batch: true
epochs: 1
evaluate_before_training: false
evaluation_epochs: 0.25
grad_accumulation: 1
gradient_clip: 0.0
learning_rate: 0.0001
lora: true
lora_alpha: 32
lora_dropout: 0.0
lora_r: 16
lora_target_modules: ''
loss_function: CrossEntropyLoss
optimizer: AdamW
save_checkpoint: last
schedule: Cosine
train_validation_data: false
use_flash_attention_2: true
warmup_epochs: 0.0
weight_decay: 0.0
|