car / running_log.txt
Jeong-su's picture
Add fine-tuned model files
e4a0b33
[INFO|2024-12-01 18:24:23] parser.py:355 >> Process rank: 1, device: cuda:1, n_gpu: 1, distributed training: True, compute dtype: torch.bfloat16
[WARNING|2024-12-01 18:24:23] logging.py:162 >> We recommend enable `upcast_layernorm` in quantized training.
[WARNING|2024-12-01 18:24:23] logging.py:162 >> `ddp_find_unused_parameters` needs to be set as False for LoRA in DDP training.
[INFO|2024-12-01 18:24:23] parser.py:355 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: torch.bfloat16
[INFO|2024-12-01 18:24:23] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/config.json
[INFO|2024-12-01 18:24:23] configuration_utils.py:746 >> Model config LlavaNextVideoConfig {
"_name_or_path": "llava-hf/LLaVA-NeXT-Video-7B-hf",
"architectures": [
"LlavaNextVideoForConditionalGeneration"
],
"ignore_index": -100,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_seq_length": 576,
"image_token_index": 32001,
"model_type": "llava_next_video",
"projector_hidden_act": "gelu",
"spatial_pool_mode": "average",
"spatial_pool_out_channels": 1024,
"spatial_pool_stride": 2,
"text_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "lmsys/vicuna-7b-v1.5",
"add_cross_attention": false,
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 1,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 11008,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 4096,
"min_length": 0,
"mlp_bias": false,
"model_type": "llama",
"no_repeat_ngram_size": 0,
"num_attention_heads": 32,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 32,
"num_key_value_heads": 32,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 0,
"prefix": null,
"pretraining_tp": 1,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 2.5,
"rope_type": "linear",
"type": "linear"
},
"rope_theta": 10000.0,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": false,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "float16",
"torchscript": false,
"type": "linear",
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"vocab_size": 32064
},
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.46.1",
"use_image_newline_parameter": true,
"video_seq_length": 288,
"video_token_index": 32000,
"vision_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "quick_gelu",
"hidden_size": 1024,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 336,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "clip_vision_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 16,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_hidden_layers": 24,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 14,
"prefix": null,
"problem_type": null,
"projection_dim": 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"vocab_size": 32000
},
"vision_feature_layer": -2,
"vision_feature_select_strategy": "default"
}
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer.model from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.model
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.json
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/added_tokens.json
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/special_tokens_map.json
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer_config.json
[INFO|2024-12-01 18:24:23] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
[INFO|2024-12-01 18:24:24] processing_utils.py:695 >> loading configuration file processor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/processor_config.json
[INFO|2024-12-01 18:24:24] image_processing_base.py:375 >> loading configuration file preprocessor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/preprocessor_config.json
[INFO|2024-12-01 18:24:24] image_processing_base.py:429 >> Image processor LlavaNextVideoImageProcessor {
"crop_size": {
"height": 336,
"width": 336
},
"do_center_crop": true,
"do_convert_rgb": true,
"do_normalize": true,
"do_pad": true,
"do_rescale": true,
"do_resize": true,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_processor_type": "LlavaNextVideoImageProcessor",
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "LlavaNextVideoProcessor",
"resample": 3,
"rescale_factor": 0.00392156862745098,
"size": {
"shortest_edge": 336
}
}
[INFO|2024-12-01 18:24:24] image_processing_base.py:375 >> loading configuration file preprocessor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/preprocessor_config.json
[INFO|2024-12-01 18:24:24] image_processing_base.py:429 >> Image processor LlavaNextImageProcessor {
"crop_size": {
"height": 336,
"width": 336
},
"do_center_crop": true,
"do_convert_rgb": true,
"do_normalize": true,
"do_pad": true,
"do_rescale": true,
"do_resize": true,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_processor_type": "LlavaNextImageProcessor",
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "LlavaNextVideoProcessor",
"resample": 3,
"rescale_factor": 0.00392156862745098,
"size": {
"shortest_edge": 336
}
}
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer.model from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.model
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer.json
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file added_tokens.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/added_tokens.json
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file special_tokens_map.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/special_tokens_map.json
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2211 >> loading file tokenizer_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/tokenizer_config.json
[INFO|2024-12-01 18:24:25] tokenization_utils_base.py:2475 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
[INFO|2024-12-01 18:24:25] processing_utils.py:695 >> loading configuration file processor_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/processor_config.json
[WARNING|2024-12-01 18:24:25] processing_utils.py:1005 >> Some kwargs in processor config are unused and will not have any effect: num_additional_image_tokens.
[INFO|2024-12-01 18:24:25] processing_utils.py:755 >> Processor LlavaNextVideoProcessor:
- video_processor: LlavaNextVideoImageProcessor {
"crop_size": {
"height": 336,
"width": 336
},
"do_center_crop": true,
"do_convert_rgb": true,
"do_normalize": true,
"do_pad": true,
"do_rescale": true,
"do_resize": true,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_processor_type": "LlavaNextVideoImageProcessor",
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "LlavaNextVideoProcessor",
"resample": 3,
"rescale_factor": 0.00392156862745098,
"size": {
"shortest_edge": 336
}
}
- image_processor: LlavaNextImageProcessor {
"crop_size": {
"height": 336,
"width": 336
},
"do_center_crop": true,
"do_convert_rgb": true,
"do_normalize": true,
"do_pad": true,
"do_rescale": true,
"do_resize": true,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_processor_type": "LlavaNextImageProcessor",
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "LlavaNextVideoProcessor",
"resample": 3,
"rescale_factor": 0.00392156862745098,
"size": {
"shortest_edge": 336
}
}
- tokenizer: LlamaTokenizerFast(name_or_path='llava-hf/LLaVA-NeXT-Video-7B-hf', vocab_size=32000, model_max_length=4096, is_fast=True, padding_side='left', truncation_side='right', special_tokens={'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<unk>'}, clean_up_tokenization_spaces=False), added_tokens_decoder={
0: AddedToken("<unk>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
1: AddedToken("<s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
2: AddedToken("</s>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
32000: AddedToken("<video>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
32001: AddedToken("<image>", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),
}
{
"image_token": "<image>",
"patch_size": 14,
"processor_class": "LlavaNextVideoProcessor",
"video_token": "<video>",
"vision_feature_select_strategy": "default"
}
[INFO|2024-12-01 18:24:25] logging.py:157 >> Loading dataset merger500.json...
[WARNING|2024-12-01 18:24:25] processing_utils.py:1005 >> Some kwargs in processor config are unused and will not have any effect: num_additional_image_tokens.
[INFO|2024-12-01 18:24:27] logging.py:157 >> Loading dataset LLM_dataset(4o).json...
[INFO|2024-12-01 18:24:27] logging.py:157 >> Loading dataset LLM_dataset(4mini).json...
[INFO|2024-12-01 18:25:48] configuration_utils.py:679 >> loading configuration file config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/config.json
[INFO|2024-12-01 18:25:48] configuration_utils.py:746 >> Model config LlavaNextVideoConfig {
"_name_or_path": "llava-hf/LLaVA-NeXT-Video-7B-hf",
"architectures": [
"LlavaNextVideoForConditionalGeneration"
],
"ignore_index": -100,
"image_grid_pinpoints": [
[
336,
672
],
[
672,
336
],
[
672,
672
],
[
1008,
336
],
[
336,
1008
]
],
"image_seq_length": 576,
"image_token_index": 32001,
"model_type": "llava_next_video",
"projector_hidden_act": "gelu",
"spatial_pool_mode": "average",
"spatial_pool_out_channels": 1024,
"spatial_pool_stride": 2,
"text_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "lmsys/vicuna-7b-v1.5",
"add_cross_attention": false,
"architectures": [
"LlamaForCausalLM"
],
"attention_bias": false,
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": 1,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": 2,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"head_dim": 128,
"hidden_act": "silu",
"hidden_size": 4096,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"initializer_range": 0.02,
"intermediate_size": 11008,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"length_penalty": 1.0,
"max_length": 20,
"max_position_embeddings": 4096,
"min_length": 0,
"mlp_bias": false,
"model_type": "llama",
"no_repeat_ngram_size": 0,
"num_attention_heads": 32,
"num_beam_groups": 1,
"num_beams": 1,
"num_hidden_layers": 32,
"num_key_value_heads": 32,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": 0,
"prefix": null,
"pretraining_tp": 1,
"problem_type": null,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"rms_norm_eps": 1e-05,
"rope_scaling": {
"factor": 2.5,
"rope_type": "linear",
"type": "linear"
},
"rope_theta": 10000.0,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": false,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": "float16",
"torchscript": false,
"type": "linear",
"typical_p": 1.0,
"use_bfloat16": false,
"use_cache": true,
"vocab_size": 32064
},
"tie_word_embeddings": false,
"torch_dtype": "bfloat16",
"transformers_version": "4.46.1",
"use_image_newline_parameter": true,
"video_seq_length": 288,
"video_token_index": 32000,
"vision_config": {
"_attn_implementation_autoset": false,
"_name_or_path": "",
"add_cross_attention": false,
"architectures": null,
"attention_dropout": 0.0,
"bad_words_ids": null,
"begin_suppress_tokens": null,
"bos_token_id": null,
"chunk_size_feed_forward": 0,
"cross_attention_hidden_size": null,
"decoder_start_token_id": null,
"diversity_penalty": 0.0,
"do_sample": false,
"early_stopping": false,
"encoder_no_repeat_ngram_size": 0,
"eos_token_id": null,
"exponential_decay_length_penalty": null,
"finetuning_task": null,
"forced_bos_token_id": null,
"forced_eos_token_id": null,
"hidden_act": "quick_gelu",
"hidden_size": 1024,
"id2label": {
"0": "LABEL_0",
"1": "LABEL_1"
},
"image_size": 336,
"initializer_factor": 1.0,
"initializer_range": 0.02,
"intermediate_size": 4096,
"is_decoder": false,
"is_encoder_decoder": false,
"label2id": {
"LABEL_0": 0,
"LABEL_1": 1
},
"layer_norm_eps": 1e-05,
"length_penalty": 1.0,
"max_length": 20,
"min_length": 0,
"model_type": "clip_vision_model",
"no_repeat_ngram_size": 0,
"num_attention_heads": 16,
"num_beam_groups": 1,
"num_beams": 1,
"num_channels": 3,
"num_hidden_layers": 24,
"num_return_sequences": 1,
"output_attentions": false,
"output_hidden_states": false,
"output_scores": false,
"pad_token_id": null,
"patch_size": 14,
"prefix": null,
"problem_type": null,
"projection_dim": 768,
"pruned_heads": {},
"remove_invalid_values": false,
"repetition_penalty": 1.0,
"return_dict": true,
"return_dict_in_generate": false,
"sep_token_id": null,
"suppress_tokens": null,
"task_specific_params": null,
"temperature": 1.0,
"tf_legacy_loss": false,
"tie_encoder_decoder": false,
"tie_word_embeddings": true,
"tokenizer_class": null,
"top_k": 50,
"top_p": 1.0,
"torch_dtype": null,
"torchscript": false,
"typical_p": 1.0,
"use_bfloat16": false,
"vocab_size": 32000
},
"vision_feature_layer": -2,
"vision_feature_select_strategy": "default"
}
[INFO|2024-12-01 18:25:48] logging.py:157 >> Quantizing model to 4 bit with bitsandbytes.
[INFO|2024-12-01 18:25:48] modeling_utils.py:3937 >> loading weights file model.safetensors from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/model.safetensors.index.json
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating LlavaNextVideoForConditionalGeneration model under default dtype torch.bfloat16.
[INFO|2024-12-01 18:25:48] configuration_utils.py:1096 >> Generate config GenerationConfig {}
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating CLIPVisionModel model under default dtype torch.bfloat16.
[INFO|2024-12-01 18:25:48] modeling_utils.py:1670 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
[INFO|2024-12-01 18:25:48] configuration_utils.py:1096 >> Generate config GenerationConfig {
"bos_token_id": 1,
"eos_token_id": 2,
"pad_token_id": 0
}
[INFO|2024-12-01 18:25:52] modeling_utils.py:4800 >> All model checkpoint weights were used when initializing LlavaNextVideoForConditionalGeneration.
[INFO|2024-12-01 18:25:52] modeling_utils.py:4808 >> All the weights of LlavaNextVideoForConditionalGeneration were initialized from the model checkpoint at llava-hf/LLaVA-NeXT-Video-7B-hf.
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlavaNextVideoForConditionalGeneration for predictions without further training.
[INFO|2024-12-01 18:25:52] configuration_utils.py:1051 >> loading configuration file generation_config.json from cache at /home/dl/.cache/huggingface/hub/models--llava-hf--LLaVA-NeXT-Video-7B-hf/snapshots/b3b624d0915bb487ef1abb15255aaa2cd5581205/generation_config.json
[INFO|2024-12-01 18:25:52] configuration_utils.py:1096 >> Generate config GenerationConfig {
"bos_token_id": 1,
"eos_token_id": 2,
"pad_token_id": 0
}
[INFO|2024-12-01 18:25:53] logging.py:157 >> Gradient checkpointing enabled.
[INFO|2024-12-01 18:25:53] logging.py:157 >> Casting multimodal projector outputs in torch.bfloat16.
[INFO|2024-12-01 18:25:53] logging.py:157 >> Using FlashAttention-2 for faster training and inference.
[INFO|2024-12-01 18:25:53] logging.py:157 >> Upcasting trainable params to float32.
[INFO|2024-12-01 18:25:53] logging.py:157 >> Fine-tuning method: LoRA
[INFO|2024-12-01 18:25:53] logging.py:157 >> Found linear modules: down_proj,v_proj,k_proj,q_proj,up_proj,o_proj,gate_proj
[INFO|2024-12-01 18:25:53] logging.py:157 >> trainable params: 19,988,480 || all params: 7,083,419,648 || trainable%: 0.2822
[INFO|2024-12-01 18:25:53] trainer.py:698 >> Using auto half precision backend
[INFO|2024-12-01 18:25:55] trainer.py:2313 >> ***** Running training *****
[INFO|2024-12-01 18:25:55] trainer.py:2314 >> Num examples = 714
[INFO|2024-12-01 18:25:55] trainer.py:2315 >> Num Epochs = 1
[INFO|2024-12-01 18:25:55] trainer.py:2316 >> Instantaneous batch size per device = 2
[INFO|2024-12-01 18:25:55] trainer.py:2319 >> Total train batch size (w. parallel, distributed & accumulation) = 32
[INFO|2024-12-01 18:25:55] trainer.py:2320 >> Gradient Accumulation steps = 8
[INFO|2024-12-01 18:25:55] trainer.py:2321 >> Total optimization steps = 22
[INFO|2024-12-01 18:25:55] trainer.py:2322 >> Number of trainable parameters = 19,988,480
[WARNING|2024-12-01 18:25:59] logging.py:168 >> `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
[INFO|2024-12-01 18:28:23] logging.py:157 >> {'loss': 0.8615, 'learning_rate': 2.5000e-06, 'epoch': 0.22}
[INFO|2024-12-01 18:30:41] logging.py:157 >> {'loss': 0.8940, 'learning_rate': 5.0000e-06, 'epoch': 0.45}
[INFO|2024-12-01 18:33:04] logging.py:157 >> {'loss': 0.8808, 'learning_rate': 7.5000e-06, 'epoch': 0.67}
[INFO|2024-12-01 18:35:22] logging.py:157 >> {'loss': 0.8746, 'learning_rate': 1.0000e-05, 'epoch': 0.89}
[INFO|2024-12-01 18:36:12] trainer.py:3801 >> Saving model checkpoint to saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/tokenizer_config.json
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/special_tokens_map.json
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/preprocessor_config.json
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/preprocessor_config.json
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/tokenizer_config.json
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/special_tokens_map.json
[INFO|2024-12-01 18:36:13] processing_utils.py:541 >> chat template saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/chat_template.json
[INFO|2024-12-01 18:36:13] processing_utils.py:547 >> processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/checkpoint-22/processor_config.json
[INFO|2024-12-01 18:36:13] trainer.py:2584 >>
Training completed. Do not forget to share your model on huggingface.co/models =)
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/preprocessor_config.json
[INFO|2024-12-01 18:36:13] image_processing_base.py:258 >> Image processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/preprocessor_config.json
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/tokenizer_config.json
[INFO|2024-12-01 18:36:13] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/special_tokens_map.json
[INFO|2024-12-01 18:36:14] processing_utils.py:541 >> chat template saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/chat_template.json
[INFO|2024-12-01 18:36:14] processing_utils.py:547 >> processor saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/processor_config.json
[INFO|2024-12-01 18:36:14] trainer.py:3801 >> Saving model checkpoint to saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24
[INFO|2024-12-01 18:36:15] tokenization_utils_base.py:2646 >> tokenizer config file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/tokenizer_config.json
[INFO|2024-12-01 18:36:15] tokenization_utils_base.py:2655 >> Special tokens file saved in saves/LLaVA-NeXT-Video-7B-Chat/lora/train_2024-12-01-18-22-24/special_tokens_map.json
[WARNING|2024-12-01 18:36:15] logging.py:162 >> No metric eval_loss to plot.
[WARNING|2024-12-01 18:36:15] logging.py:162 >> No metric eval_accuracy to plot.
[INFO|2024-12-01 18:36:15] modelcard.py:449 >> Dropping the following result as it does not have all the necessary fields:
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}