impossibleexchange commited on
Commit
ff38c31
1 Parent(s): b120dfb

Upload training_config.yml with huggingface_hub

Browse files
Files changed (1) hide show
  1. training_config.yml +87 -0
training_config.yml ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ _component_: models.lora_mmllama3_8b
3
+ lora_attn_modules:
4
+ - q_proj
5
+ - v_proj
6
+ apply_lora_to_mlp: false
7
+ apply_lora_to_output: false
8
+ lora_rank: 32
9
+ lora_alpha: 64
10
+ perception_tokens: 2
11
+ use_clip: false
12
+ tokenizer:
13
+ _component_: models.a2a_tokenizer
14
+ path: models/tokenizer.model
15
+ checkpointer:
16
+ _component_: torchtune.utils.FullModelMetaCheckpointer
17
+ checkpoint_dir: smtst2/
18
+ checkpoint_files:
19
+ - meta_model_1.pt
20
+ adapter_checkpoint: null
21
+ recipe_checkpoint: null
22
+ output_dir: output_checkpoints/experiment_1
23
+ model_type: LLAMA3
24
+ resume_from_checkpoint: false
25
+ interim_checkpoint_steps: 10000
26
+ interim_gen_steps: null
27
+ max_new_tokens: 100
28
+ temperature: 0.6
29
+ top_k: 300
30
+ dataset:
31
+ _component_: ds.EvenBatcher
32
+ buffer_size: 1
33
+ dataset:
34
+ _component_: ds.RoundRobinDataset
35
+ datasets:
36
+ - _component_: ds.OmegaVideoCaptionDataset
37
+ length: 500000
38
+ - _component_: ds.LlavaInstructDataset
39
+ dataset_path: ds/coco_llava_instruct/output.parquet
40
+ train_on_input: false
41
+ - _component_: ds.LlavaInstructDataset
42
+ dataset_path: ds/vision_flan/output.parquet
43
+ train_on_input: false
44
+ - _component_: ds.CaptionInstructDataset
45
+ dataset_path: ds/sam_llava/output.parquet
46
+ train_on_input: false
47
+ # - _component_: ds.BagelLlama3Dataset
48
+ # parquet_path: ds/bagel-llama-3-v1.0/bagel-input-output-v1.0.parquet
49
+ # train_on_input: false
50
+ seed: null
51
+ shuffle: true
52
+ batch_size: 64
53
+ optimizer:
54
+ _component_: torch.optim.AdamW
55
+ weight_decay: 1.5
56
+ lr: 1.0
57
+ lr_scheduler:
58
+ _component_: torchtune.modules.get_cosine_schedule_with_warmup
59
+ num_warmup_steps: 1000
60
+ loss:
61
+ _component_: torch.nn.CrossEntropyLoss
62
+ epochs: 3
63
+ max_steps_per_epoch: null
64
+ gradient_accumulation_steps: 192
65
+ compile: false
66
+ output_dir: /tmp/lora_finetune_output
67
+ metric_logger:
68
+ _component_: torchtune.utils.metric_logging.DiskLogger
69
+ log_dir: ${output_dir}
70
+ log_every_n_steps: null
71
+ device: cuda
72
+ dtype: bf16
73
+ enable_activation_checkpointing: false
74
+ profiler:
75
+ _component_: torchtune.utils.profiler
76
+ enabled: false
77
+ inference:
78
+ prompt_template: 'Video:
79
+
80
+ {video}
81
+
82
+ Caption the previous video.'
83
+ max_new_tokens: 300
84
+ temperature: 0.6
85
+ top_k: 300
86
+ quantizer: null
87
+ gradient-accumulation-steps: 32