KingNish commited on
Commit
8e1b516
1 Parent(s): cd4b4e1

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +3 -2
chatbot.py CHANGED
@@ -27,8 +27,8 @@ import io # Add this import for working with image bytes
27
 
28
  # You can also use models that are commented below
29
  # model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
30
- # model_id = "llava-hf/llava-interleave-qwen-7b-hf"
31
- model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
32
  processor = LlavaProcessor.from_pretrained(model_id)
33
  model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
34
  model.to("cuda")
@@ -250,6 +250,7 @@ def model_inference( user_prompt, chat_history, web_search):
250
 
251
  if image.endswith(video_extensions):
252
  image = sample_frames(image)
 
253
  image_tokens = "<image>" * int(len(image))
254
  prompt = f"<|im_start|>user {image_tokens}\n{user_prompt}<|im_end|><|im_start|>assistant"
255
 
 
27
 
28
  # You can also use models that are commented below
29
  # model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
30
+ model_id = "llava-hf/llava-interleave-qwen-7b-hf"
31
+ # model_id = "llava-hf/llava-interleave-qwen-7b-dpo-hf"
32
  processor = LlavaProcessor.from_pretrained(model_id)
33
  model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16, use_flash_attention_2=True, low_cpu_mem_usage=True)
34
  model.to("cuda")
 
250
 
251
  if image.endswith(video_extensions):
252
  image = sample_frames(image)
253
+ print(len(image))
254
  image_tokens = "<image>" * int(len(image))
255
  prompt = f"<|im_start|>user {image_tokens}\n{user_prompt}<|im_end|><|im_start|>assistant"
256