spacy-mamba / cfg.yaml
samvelkoch's picture
Upload cfg.yaml
ee581d8 verified
architecture:
backbone_dtype: float32
gradient_checkpointing: true
intermediate_dropout: 0.0
pretrained: true
pretrained_weights: ''
augmentation:
neftune_noise_alpha: 0.0
random_parent_probability: 0.0
skip_parent_probability: 0.0
token_mask_probability: 0.1
dataset:
add_eos_token_to_answer: false
add_eos_token_to_prompt: false
add_eos_token_to_system: false
add_prompt_answer_tokens: false
answer_column: score
chatbot_author: H2O.ai
chatbot_name: h2oGPT
data_sample: 1.0
data_sample_choice:
- Train
- Validation
limit_chained_samples: false
mask_prompt_labels: true
num_classes: 6
parent_id_column: None
personalize: false
prompt_column:
- full_text
- count_sentence
- count_word
- count_paragraph
- count_symbol
- count_punctuation
- count_stop_words
- count_ner
- CARDINAL
- DATE
- EVENT
- FAC
- GPE
- LANGUAGE
- LAW
- LOC
- MONEY
- NORP
- ORDINAL
- ORG
- PERCENT
- PERSON
- PRODUCT
- QUANTITY
- TIME
- WORK_OF_ART
- mean_ner_sentence
- len_ner
- mean_ner
- proc_ner_per_text
- max_ner_per_sentence
- independent_clauses
- dependent_clauses
- formality_level
- simple_sentence_count
- complex_sentence_count
- has_intro
- count_intro
- has_repetitions
- count_repetitions
- coherence
- -LRB-
- -RRB-
- ADD
- AFX
- CC
- CD
- DT
- EX
- FW
- HYPH
- IN
- JJ
- JJR
- JJS
- LS
- MD
- NFP
- NN
- NNP
- NNPS
- NNS
- PDT
- POS
- PRP
- PRP$
- RB
- RBR
- RBS
- RP
- SYM
- TO
- UH
- VB
- VBD
- VBG
- VBN
- VBP
- VBZ
- WDT
- WP
- WP$
- WRB
- XX
- _SP
- ROOT
- acl
- acomp
- advcl
- advmod
- agent
- amod
- appos
- attr
- aux
- auxpass
- case
- cc
- ccomp
- compound
- conj
- csubj
- csubjpass
- dative
- dep
- det
- dobj
- expl
- intj
- mark
- meta
- neg
- nmod
- npadvmod
- nsubj
- nsubjpass
- nummod
- oprd
- parataxis
- pcomp
- pobj
- poss
- preconj
- predet
- prep
- prt
- punct
- quantmod
- relcl
- xcomp
system_column: None
text_answer_separator: ''
text_prompt_start: ''
text_system_start: ''
train_dataframe: /root/h2o-llmstudio/data/user/train_spacy_mamba/train_spacy_mamba.csv
validation_dataframe: None
validation_size: 0.01
validation_strategy: automatic
environment:
compile_model: false
deepspeed_allgather_bucket_size: 1000000
deepspeed_method: ZeRO2
deepspeed_reduce_bucket_size: 1000000
deepspeed_stage3_param_persistence_threshold: 1000000
deepspeed_stage3_prefetch_bucket_size: 1000000
find_unused_parameters: false
gpus:
- '0'
huggingface_branch: main
mixed_precision: true
mixed_precision_dtype: bfloat16
number_of_workers: 8
seed: -1
trust_remote_code: true
use_deepspeed: false
experiment_name: spacy-mamba
llm_backbone: h2oai/h2ogpt-4096-llama2-7b
logging:
logger: Neptune
neptune_project: samvelkoch/essay
output_directory: /root/h2o-llmstudio/output/user/spacy-mamba/
prediction:
batch_size_inference: 0
metric: Accuracy
problem_type: text_causal_classification_modeling
tokenizer:
add_prompt_answer_tokens: false
max_length: 10240
padding_quantile: 1.0
tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
training:
batch_size: 2
differential_learning_rate: 1.0e-05
differential_learning_rate_layers:
- classification_head
drop_last_batch: true
epochs: 1
evaluate_before_training: false
evaluation_epochs: 1.0
freeze_layers: []
grad_accumulation: 1
gradient_clip: 0.0
learning_rate: 0.0001
lora: true
lora_alpha: 16
lora_dropout: 0.05
lora_r: 4
lora_target_modules: ''
lora_unfreeze_layers: []
loss_function: CrossEntropyLoss
optimizer: AdamW
save_checkpoint: last
schedule: Cosine
train_validation_data: false
use_dora: false
use_flash_attention_2: true
warmup_epochs: 0.0
weight_decay: 0.0