File size: 5,587 Bytes
16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d b7a40ba 16cf95d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
# lightning.pytorch==2.4.0
seed_everything: 42
trainer:
accelerator: auto
strategy:
class_path: lightning.pytorch.strategies.DDPStrategy
init_args:
accelerator: null
parallel_devices: null
cluster_environment: null
checkpoint_io: null
precision_plugin: null
ddp_comm_state: null
ddp_comm_hook: null
ddp_comm_wrapper: null
model_averaging_period: null
process_group_backend: null
timeout: 0:30:00
start_method: popen
output_device: null
dim: 0
broadcast_buffers: true
process_group: null
bucket_cap_mb: 25
find_unused_parameters: false
check_reduction: false
gradient_as_bucket_view: false
static_graph: false
delay_all_reduce_named_params: null
param_to_hook_all_reduce: null
mixed_precision: null
device_mesh: null
devices: auto
num_nodes: 1
precision: 32
logger:
class_path: lightning.pytorch.loggers.WandbLogger
init_args:
name: csp_acceptor_rnafm_1.6B
save_dir: modelgenerator/logs
version: null
offline: false
dir: null
id: null
anonymous: null
project: rna_tasks
log_model: false
experiment: null
prefix: ''
checkpoint_name: null
job_type: null
config: null
entity: null
reinit: null
tags: null
group: null
notes: null
magic: null
config_exclude_keys: null
config_include_keys: null
mode: null
allow_val_change: null
resume: null
force: null
tensorboard: null
sync_tensorboard: null
monitor_gym: null
save_code: true
settings: null
callbacks:
- class_path: lightning.pytorch.callbacks.LearningRateMonitor
init_args:
logging_interval: step
log_momentum: false
log_weight_decay: false
- class_path: lightning.pytorch.callbacks.ModelCheckpoint
init_args:
dirpath: modelgenerator/logs/rna_tasks/csp_acceptor_rnafm_1.6B
filename: best_val:{epoch}-{val_f1:.3f}
monitor: val_f1
verbose: false
save_last: null
save_top_k: 1
save_weights_only: false
mode: max
auto_insert_metric_name: true
every_n_train_steps: null
train_time_interval: null
every_n_epochs: 1
save_on_train_epoch_end: null
enable_version_counter: true
fast_dev_run: false
max_epochs: 10
min_epochs: null
max_steps: -1
min_steps: null
max_time: null
limit_train_batches: null
limit_val_batches: null
limit_test_batches: null
limit_predict_batches: null
overfit_batches: 0.0
val_check_interval: null
check_val_every_n_epoch: 1
num_sanity_val_steps: null
log_every_n_steps: 50
enable_checkpointing: null
enable_progress_bar: null
enable_model_summary: null
accumulate_grad_batches: 1
gradient_clip_val: 1
gradient_clip_algorithm: null
deterministic: null
benchmark: null
inference_mode: true
use_distributed_sampler: true
profiler:
class_path: lightning.pytorch.profilers.PyTorchProfiler
init_args:
dirpath: null
filename: null
group_by_input_shapes: false
emit_nvtx: false
export_to_chrome: true
row_limit: 20
sort_by_key: null
record_module_names: true
table_kwargs: null
record_shapes: false
dict_kwargs:
profile_memory: true
detect_anomaly: false
barebones: false
plugins: null
sync_batchnorm: false
reload_dataloaders_every_n_epochs: 0
default_root_dir: modelgenerator/logs
model:
class_path: modelgenerator.tasks.SequenceClassification
init_args:
backbone:
class_path: modelgenerator.backbones.rnafm
init_args:
from_scratch: false
max_length: 1024
use_peft: true
save_peft_only: true
lora_r: 32
lora_alpha: 64
lora_dropout: 0.1
lora_target_modules:
- query
- value
config_overwrites:
hidden_dropout_prob: 0.1
attention_probs_dropout_prob: 0.1
model_init_args: null
adapter:
class_path: modelgenerator.adapters.MLPPoolAdapter
init_args:
pooling: cls_pooling
hidden_sizes:
- 512
- 128
bias: true
dropout: 0.1
dropout_in_middle: false
n_classes: 2
multilabel: false
optimizer:
class_path: torch.optim.AdamW
init_args:
lr: 0.00025
betas:
- 0.9
- 0.999
eps: 1.0e-08
weight_decay: 0.01
amsgrad: false
maximize: false
foreach: null
capturable: false
differentiable: false
fused: null
lr_scheduler:
class_path: modelgenerator.lr_schedulers.LinearWithWarmup
init_args:
warmup_ratio: 0.01
num_warmup_steps: null
last_epoch: -1
verbose: deprecated
use_legacy_adapter: false
strict_loading: true
reset_optimizer_states: false
data:
class_path: modelgenerator.data.SpliceSitePrediction
init_args:
path: genbio-ai/rna-downstream-tasks
config_name: splice_site_acceptor
test_split_name: test_danio
class_filter: null
train_split_files: null
test_split_files: null
valid_split_files: null
test_split_size: 0
valid_split_size: 0
random_seed: 42
batch_size: 16
shuffle: true
sampler: null
num_workers: 0
pin_memory: true
persistent_workers: false
cv_num_folds: 1
cv_test_fold_id: 0
cv_enable_val_fold: true
cv_fold_id_col: null
ckpt_path: null
|