|
|
|
import torch |
|
from transformers import AutoModel, AutoTokenizer |
|
from utils import load_image, load_video |
|
|
|
if __name__ == "__main__": |
|
|
|
dir, rev = 'morpheushoc/InternVL2_5-2B', 'main' |
|
|
|
|
|
model = AutoModel.from_pretrained(dir, |
|
torch_dtype=torch.bfloat16, |
|
load_in_8bit=False, |
|
low_cpu_mem_usage=True, |
|
use_flash_attn=True, |
|
trust_remote_code=True, |
|
revision=rev).eval().cuda() |
|
tokenizer = AutoTokenizer.from_pretrained(dir, trust_remote_code=True, use_fast=False) |
|
generation_config = dict(max_new_tokens=1024, do_sample=False) |
|
|
|
paths = [ |
|
'image1.jpg', |
|
'image1.jpg', |
|
'image2.jpg', |
|
'red-panda.mp4', |
|
] |
|
|
|
questions = [ |
|
'describe this image', |
|
'describe this image', |
|
'describe this image', |
|
'describe this video' |
|
] |
|
|
|
for fp, question in zip(paths, questions): |
|
if fp.endswith('mp4'): |
|
pixel_values, num_patches_list = load_video(fp, num_segments=8, max_num=1) |
|
prefix = ''.join([f'Frame{i+1}: <image>\n' for i in range(len(num_patches_list))]) |
|
|
|
else: |
|
pixel_values = load_image(fp, max_num=12).to(torch.bfloat16).cuda() |
|
num_patches_list = [len(pixel_values)] |
|
prefix = '<image>\n' |
|
|
|
question = prefix + question |
|
pixel_values = pixel_values.to(torch.bfloat16).cuda() |
|
response, history = model.chat(tokenizer, pixel_values, question, generation_config, |
|
num_patches_list=num_patches_list, history=None, return_history=True) |
|
print(f'User: {question}\nAssistant: {response}') |
|
|
|
question = 'How many animals ?' |
|
response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True) |
|
print(f'User: {question}\nAssistant: {response}') |