lixinhao commited on
Commit
c7a5c8f
·
verified ·
1 Parent(s): 4c75d1b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -108,12 +108,13 @@ pip install flash-attn --no-build-isolation
108
  Then you could use our model:
109
  ```python
110
  from transformers import AutoModel, AutoTokenizer
 
111
 
112
  # model setting
113
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2-7B_res224'
114
 
115
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
116
- model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda()
117
  image_processor = model.get_vision_tower().image_processor
118
 
119
  mm_llm_compress = False # use the global compress or not
 
108
  Then you could use our model:
109
  ```python
110
  from transformers import AutoModel, AutoTokenizer
111
+ import torch
112
 
113
  # model setting
114
  model_path = 'OpenGVLab/VideoChat-Flash-Qwen2-7B_res224'
115
 
116
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
117
+ model = AutoModel.from_pretrained(model_path, trust_remote_code=True).to(torch.bfloat16).cuda()
118
  image_processor = model.get_vision_tower().image_processor
119
 
120
  mm_llm_compress = False # use the global compress or not