Upload folder using huggingface_hub
Browse files- README.md +12 -12
- conversation.py +3 -3
README.md
CHANGED
@@ -7,7 +7,7 @@ pipeline_tag: image-text-to-text
|
|
7 |
|
8 |
[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[🆕 Blog\]](https://internvl.github.io/blog/) [\[📜 InternVL 1.0 Paper\]](https://arxiv.org/abs/2312.14238) [\[📜 InternVL 1.5 Report\]](https://arxiv.org/abs/2404.16821)
|
9 |
|
10 |
-
[\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 中文解读\]](https://zhuanlan.zhihu.com/p/706547971) \[
|
11 |
|
12 |
[切换至中文版](#简介)
|
13 |
|
@@ -19,7 +19,7 @@ We are excited to announce the release of InternVL 2.0, the latest addition to t
|
|
19 |
|
20 |
Compared to the state-of-the-art open-source multimodal large language models, InternVL 2.0 surpasses most open-source models. It demonstrates competitive performance on par with proprietary commercial models across various capabilities, including document and chart comprehension, infographics QA, scene text understanding and OCR tasks, scientific and mathematical problem solving, as well as cultural understanding and integrated multimodal capabilities.
|
21 |
|
22 |
-
InternVL 2.0 is trained with an 8k context window and utilizes training data consisting of long texts, multiple images, and videos, significantly improving its ability to handle these types of inputs compared to InternVL 1.5. For more details, please refer to our blog and GitHub.
|
23 |
|
24 |
| Model Name | Vision Part | Language Part | HF Link | MS Link |
|
25 |
| :------------------: | :---------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------: | :--------------------------------------------------------------: | :--------------------------------------------------------------------: |
|
@@ -473,7 +473,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
|
473 |
from lmdeploy.vl import load_image
|
474 |
|
475 |
model = 'OpenGVLab/InternVL2-2B'
|
476 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
477 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
478 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
479 |
chat_template_config.meta_instruction = system_prompt
|
@@ -497,7 +497,7 @@ from lmdeploy.vl import load_image
|
|
497 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
498 |
|
499 |
model = 'OpenGVLab/InternVL2-2B'
|
500 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
501 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
502 |
chat_template_config.meta_instruction = system_prompt
|
503 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -523,7 +523,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
|
523 |
from lmdeploy.vl import load_image
|
524 |
|
525 |
model = 'OpenGVLab/InternVL2-2B'
|
526 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
527 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
528 |
chat_template_config.meta_instruction = system_prompt
|
529 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -547,7 +547,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig, Genera
|
|
547 |
from lmdeploy.vl import load_image
|
548 |
|
549 |
model = 'OpenGVLab/InternVL2-2B'
|
550 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
551 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
552 |
chat_template_config.meta_instruction = system_prompt
|
553 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -568,7 +568,7 @@ To deploy InternVL2 as an API, please configure the chat template config first.
|
|
568 |
```json
|
569 |
{
|
570 |
"model_name":"internvl-internlm2",
|
571 |
-
"meta_instruction":"我是书生·万象,英文名是InternVL
|
572 |
"stop_words":["<|im_start|>", "<|im_end|>"]
|
573 |
}
|
574 |
```
|
@@ -772,7 +772,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
|
772 |
from lmdeploy.vl import load_image
|
773 |
|
774 |
model = 'OpenGVLab/InternVL2-2B'
|
775 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
776 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
777 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
778 |
chat_template_config.meta_instruction = system_prompt
|
@@ -794,7 +794,7 @@ from lmdeploy.vl import load_image
|
|
794 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
795 |
|
796 |
model = 'OpenGVLab/InternVL2-2B'
|
797 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
798 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
799 |
chat_template_config.meta_instruction = system_prompt
|
800 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -819,7 +819,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig
|
|
819 |
from lmdeploy.vl import load_image
|
820 |
|
821 |
model = 'OpenGVLab/InternVL2-2B'
|
822 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
823 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
824 |
chat_template_config.meta_instruction = system_prompt
|
825 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -843,7 +843,7 @@ from lmdeploy import pipeline, TurbomindEngineConfig, ChatTemplateConfig, Genera
|
|
843 |
from lmdeploy.vl import load_image
|
844 |
|
845 |
model = 'OpenGVLab/InternVL2-2B'
|
846 |
-
system_prompt = '我是书生·万象,英文名是InternVL
|
847 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
848 |
chat_template_config.meta_instruction = system_prompt
|
849 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
@@ -864,7 +864,7 @@ print(sess.response.text)
|
|
864 |
```json
|
865 |
{
|
866 |
"model_name":"internvl-internlm2",
|
867 |
-
"meta_instruction":"我是书生·万象,英文名是InternVL
|
868 |
"stop_words":["<|im_start|>", "<|im_end|>"]
|
869 |
}
|
870 |
```
|
|
|
7 |
|
8 |
[\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[🆕 Blog\]](https://internvl.github.io/blog/) [\[📜 InternVL 1.0 Paper\]](https://arxiv.org/abs/2312.14238) [\[📜 InternVL 1.5 Report\]](https://arxiv.org/abs/2404.16821)
|
9 |
|
10 |
+
[\[🗨️ Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 中文解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/)
|
11 |
|
12 |
[切换至中文版](#简介)
|
13 |
|
|
|
19 |
|
20 |
Compared to the state-of-the-art open-source multimodal large language models, InternVL 2.0 surpasses most open-source models. It demonstrates competitive performance on par with proprietary commercial models across various capabilities, including document and chart comprehension, infographics QA, scene text understanding and OCR tasks, scientific and mathematical problem solving, as well as cultural understanding and integrated multimodal capabilities.
|
21 |
|
22 |
+
InternVL 2.0 is trained with an 8k context window and utilizes training data consisting of long texts, multiple images, and videos, significantly improving its ability to handle these types of inputs compared to InternVL 1.5. For more details, please refer to our [blog](https://internvl.github.io/blog/2024-07-02-InternVL-2.0/) and [GitHub](https://github.com/OpenGVLab/InternVL).
|
23 |
|
24 |
| Model Name | Vision Part | Language Part | HF Link | MS Link |
|
25 |
| :------------------: | :---------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------: | :--------------------------------------------------------------: | :--------------------------------------------------------------------: |
|
|
|
473 |
from lmdeploy.vl import load_image
|
474 |
|
475 |
model = 'OpenGVLab/InternVL2-2B'
|
476 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
477 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
478 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
479 |
chat_template_config.meta_instruction = system_prompt
|
|
|
497 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
498 |
|
499 |
model = 'OpenGVLab/InternVL2-2B'
|
500 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
501 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
502 |
chat_template_config.meta_instruction = system_prompt
|
503 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
523 |
from lmdeploy.vl import load_image
|
524 |
|
525 |
model = 'OpenGVLab/InternVL2-2B'
|
526 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
527 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
528 |
chat_template_config.meta_instruction = system_prompt
|
529 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
547 |
from lmdeploy.vl import load_image
|
548 |
|
549 |
model = 'OpenGVLab/InternVL2-2B'
|
550 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
551 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
552 |
chat_template_config.meta_instruction = system_prompt
|
553 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
568 |
```json
|
569 |
{
|
570 |
"model_name":"internvl-internlm2",
|
571 |
+
"meta_instruction":"我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。",
|
572 |
"stop_words":["<|im_start|>", "<|im_end|>"]
|
573 |
}
|
574 |
```
|
|
|
772 |
from lmdeploy.vl import load_image
|
773 |
|
774 |
model = 'OpenGVLab/InternVL2-2B'
|
775 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
776 |
image = load_image('https://raw.githubusercontent.com/open-mmlab/mmdeploy/main/tests/data/tiger.jpeg')
|
777 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
778 |
chat_template_config.meta_instruction = system_prompt
|
|
|
794 |
from lmdeploy.vl.constants import IMAGE_TOKEN
|
795 |
|
796 |
model = 'OpenGVLab/InternVL2-2B'
|
797 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
798 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
799 |
chat_template_config.meta_instruction = system_prompt
|
800 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
819 |
from lmdeploy.vl import load_image
|
820 |
|
821 |
model = 'OpenGVLab/InternVL2-2B'
|
822 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
823 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
824 |
chat_template_config.meta_instruction = system_prompt
|
825 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
843 |
from lmdeploy.vl import load_image
|
844 |
|
845 |
model = 'OpenGVLab/InternVL2-2B'
|
846 |
+
system_prompt = '我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。'
|
847 |
chat_template_config = ChatTemplateConfig('internvl-internlm2')
|
848 |
chat_template_config.meta_instruction = system_prompt
|
849 |
pipe = pipeline(model, chat_template_config=chat_template_config,
|
|
|
864 |
```json
|
865 |
{
|
866 |
"model_name":"internvl-internlm2",
|
867 |
+
"meta_instruction":"我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。",
|
868 |
"stop_words":["<|im_start|>", "<|im_end|>"]
|
869 |
}
|
870 |
```
|
conversation.py
CHANGED
@@ -339,7 +339,7 @@ register_conv_template(
|
|
339 |
name='Hermes-2',
|
340 |
system_template='<|im_start|>system\n{system_message}',
|
341 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
342 |
-
# system_message='我是书生·万象,英文名是InternVL
|
343 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
344 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
345 |
sep_style=SeparatorStyle.MPT,
|
@@ -360,7 +360,7 @@ register_conv_template(
|
|
360 |
name='internlm2-chat',
|
361 |
system_template='<|im_start|>system\n{system_message}',
|
362 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
363 |
-
# system_message='我是书生·万象,英文名是InternVL
|
364 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
365 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
366 |
sep_style=SeparatorStyle.MPT,
|
@@ -379,7 +379,7 @@ register_conv_template(
|
|
379 |
name='phi3-chat',
|
380 |
system_template='<|system|>\n{system_message}',
|
381 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
382 |
-
# system_message='我是书生·万象,英文名是InternVL
|
383 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
384 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
385 |
sep_style=SeparatorStyle.MPT,
|
|
|
339 |
name='Hermes-2',
|
340 |
system_template='<|im_start|>system\n{system_message}',
|
341 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
342 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
343 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
344 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
345 |
sep_style=SeparatorStyle.MPT,
|
|
|
360 |
name='internlm2-chat',
|
361 |
system_template='<|im_start|>system\n{system_message}',
|
362 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
363 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
364 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
365 |
roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
|
366 |
sep_style=SeparatorStyle.MPT,
|
|
|
379 |
name='phi3-chat',
|
380 |
system_template='<|system|>\n{system_message}',
|
381 |
# note: The new system prompt was not used here to avoid changes in benchmark performance.
|
382 |
+
# system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
|
383 |
system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
|
384 |
roles=('<|user|>\n', '<|assistant|>\n'),
|
385 |
sep_style=SeparatorStyle.MPT,
|