Vision-CAIR commited on
Commit
b7bd78d
1 Parent(s): 0c189dc

Push model using huggingface_hub.

Browse files
config.json CHANGED
@@ -35,7 +35,7 @@
35
  "remove_template": false,
36
  "token_pooling": true,
37
  "torch_dtype": "float32",
38
- "transformers_version": "4.42.3",
39
  "use_grad_checkpoint": true,
40
  "use_grad_checkpoint_llm": true,
41
  "vit_model": "eva_clip_g",
 
35
  "remove_template": false,
36
  "token_pooling": true,
37
  "torch_dtype": "float32",
38
+ "transformers_version": "4.37.2",
39
  "use_grad_checkpoint": true,
40
  "use_grad_checkpoint_llm": true,
41
  "vit_model": "eva_clip_g",
generation_config.json CHANGED
@@ -1,4 +1,4 @@
1
  {
2
  "_from_model_config": true,
3
- "transformers_version": "4.42.3"
4
  }
 
1
  {
2
  "_from_model_config": true,
3
+ "transformers_version": "4.37.2"
4
  }
mini_gpt4_llama_v2.py CHANGED
@@ -644,6 +644,7 @@ class MiniGPT4_Video(Blip2Base, PreTrainedModel):
644
  repetition_penalty=repetition_penalty,
645
  # stopping_criteria=stopping_criteria,
646
  # use_fastv=False,
 
647
  )
648
 
649
  answers = []
@@ -845,11 +846,11 @@ class MiniGPT4_Video(Blip2Base, PreTrainedModel):
845
  msg = model.load_state_dict(ckpt['model'], strict=False)
846
  # push the model to the hub with its metadata and config file
847
  model.to('cuda')
848
- model.push_to_hub("Vision-CAIR/MiniGPT4-video-mistral-hf")
849
  video_config = minigpt4_video_config(cfg)
850
  # video_config.save_pretrained("minigpt4_video_config")
851
  # print("Save Minigpt-4-LLM Config: minigpt4_video_config")
852
- video_config.push_to_hub("Vision-CAIR/MiniGPT4-video-mistral-hf")
853
  return model
854
 
855
 
 
644
  repetition_penalty=repetition_penalty,
645
  # stopping_criteria=stopping_criteria,
646
  # use_fastv=False,
647
+ use_cache=True,
648
  )
649
 
650
  answers = []
 
846
  msg = model.load_state_dict(ckpt['model'], strict=False)
847
  # push the model to the hub with its metadata and config file
848
  model.to('cuda')
849
+ # model.push_to_hub("Vision-CAIR/MiniGPT4-video-mistral-hf")
850
  video_config = minigpt4_video_config(cfg)
851
  # video_config.save_pretrained("minigpt4_video_config")
852
  # print("Save Minigpt-4-LLM Config: minigpt4_video_config")
853
+ # video_config.push_to_hub("Vision-CAIR/MiniGPT4-video-mistral-hf")
854
  return model
855
 
856
 
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9f3ca9f784f5486e3935dc30b7b6e6cf8bb4635f71098cc8b30b6a48cf914cc7
3
  size 4981849121
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a0ae99f0ba8f23e67afc7c82027eb2ae2df03505b3b7f4cf2ca3af78cdac47
3
  size 4981849121
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46225a736e4a1e79a61f7f47fd1b3180e1ae0f7b6118abfc6907961f9b8ebda3
3
  size 4610750095
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d877cf7c7a09380e9a42a11aa951dc1fcc83c48c03f37d14d7b7b8b3f3ae224
3
  size 4610750095