Commit
•
47a2486
1
Parent(s):
fd930f2
update processor kwargs
Browse files
README.md
CHANGED
@@ -87,7 +87,7 @@ def read_video_pyav(container, indices):
|
|
87 |
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
88 |
|
89 |
|
90 |
-
# define a chat
|
91 |
# Each value in "content" has to be a list of dicts with types ("text", "image", "video")
|
92 |
conversation = [
|
93 |
{
|
@@ -136,7 +136,7 @@ prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
|
136 |
|
137 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
138 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
139 |
-
inputs_image = processor(prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
|
140 |
|
141 |
output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False)
|
142 |
print(processor.decode(output[0][2:], skip_special_tokens=True))
|
|
|
87 |
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
88 |
|
89 |
|
90 |
+
# define a chat history and use `apply_chat_template` to get correctly formatted prompt
|
91 |
# Each value in "content" has to be a list of dicts with types ("text", "image", "video")
|
92 |
conversation = [
|
93 |
{
|
|
|
136 |
|
137 |
image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
138 |
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
139 |
+
inputs_image = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
|
140 |
|
141 |
output = model.generate(**inputs_video, max_new_tokens=100, do_sample=False)
|
142 |
print(processor.decode(output[0][2:], skip_special_tokens=True))
|