v1
Browse files- .vscode/launch.json +15 -0
- README.md +4 -4
- app.py +141 -0
- config.py +5 -0
- model/arch_0_5b/configuration_intern_vit.py +114 -0
- model/arch_0_5b/configuration_phantom.py +81 -0
- model/arch_0_5b/modeling_intern_vit.py +430 -0
- model/arch_0_5b/modeling_phantom.py +176 -0
- model/arch_0_5b/modeling_qwen2.py +1688 -0
- model/arch_1_8b/configuration_intern_vit.py +114 -0
- model/arch_1_8b/configuration_internlm2.py +150 -0
- model/arch_1_8b/configuration_phantom.py +82 -0
- model/arch_1_8b/modeling_intern_vit.py +430 -0
- model/arch_1_8b/modeling_internlm2.py +1488 -0
- model/arch_1_8b/modeling_phantom.py +176 -0
- model/arch_1_8b/tokenization_internlm2.py +235 -0
- model/arch_3_8b/configuration_intern_vit.py +114 -0
- model/arch_3_8b/configuration_phantom.py +82 -0
- model/arch_3_8b/configuration_phi3.py +211 -0
- model/arch_3_8b/modeling_intern_vit.py +430 -0
- model/arch_3_8b/modeling_phantom.py +177 -0
- model/arch_3_8b/modeling_phi3.py +1683 -0
- model/arch_7b/configuration_intern_vit.py +114 -0
- model/arch_7b/configuration_internlm2.py +150 -0
- model/arch_7b/configuration_phantom.py +82 -0
- model/arch_7b/modeling_intern_vit.py +430 -0
- model/arch_7b/modeling_internlm2.py +1487 -0
- model/arch_7b/modeling_phantom.py +176 -0
- model/arch_7b/tokenization_internlm2.py +235 -0
- model/load_model.py +107 -0
- requirements.txt +20 -0
- utils/__init__.py +0 -0
- utils/ddp_accel.yaml +16 -0
- utils/ds_accel.yaml +23 -0
- utils/utils.py +251 -0
.vscode/launch.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
// Use IntelliSense to learn about possible attributes.
|
3 |
+
// Hover to view descriptions of existing attributes.
|
4 |
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
5 |
+
"version": "0.2.0",
|
6 |
+
"configurations": [
|
7 |
+
{
|
8 |
+
"name": "Python Debugger: Current File",
|
9 |
+
"type": "debugpy",
|
10 |
+
"request": "launch",
|
11 |
+
"program": "${file}",
|
12 |
+
"console": "integratedTerminal"
|
13 |
+
}
|
14 |
+
]
|
15 |
+
}
|
README.md
CHANGED
@@ -1,10 +1,10 @@
|
|
1 |
---
|
2 |
title: Phantom
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
|
|
1 |
---
|
2 |
title: Phantom
|
3 |
+
emoji: ⛰️
|
4 |
+
colorFrom: yellow
|
5 |
+
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.36.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: mit
|
app.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# A100 Zero GPU
|
2 |
+
import spaces
|
3 |
+
|
4 |
+
# flash attention
|
5 |
+
import subprocess
|
6 |
+
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
7 |
+
|
8 |
+
# Phantom Package
|
9 |
+
import torch
|
10 |
+
from PIL import Image
|
11 |
+
from utils.utils import *
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from model.load_model import load_model
|
14 |
+
from torchvision.transforms.functional import pil_to_tensor
|
15 |
+
|
16 |
+
# Gradio Package
|
17 |
+
import time
|
18 |
+
import gradio as gr
|
19 |
+
from threading import Thread
|
20 |
+
from accelerate import Accelerator
|
21 |
+
from transformers import TextIteratorStreamer
|
22 |
+
from torchvision.transforms.functional import pil_to_tensor
|
23 |
+
|
24 |
+
# accel
|
25 |
+
accel = Accelerator()
|
26 |
+
|
27 |
+
# loading model
|
28 |
+
model_1_8, tokenizer_1_8 = load_model(size='1.8b')
|
29 |
+
|
30 |
+
# loading model
|
31 |
+
model_3_8, tokenizer_3_8 = load_model(size='3.8b')
|
32 |
+
|
33 |
+
# loading model
|
34 |
+
model_7, tokenizer_7 = load_model(size='7b')
|
35 |
+
|
36 |
+
def threading_function(inputs, streamer, device, model, tokenizer, temperature, new_max_token, top_p):
|
37 |
+
|
38 |
+
# propagation
|
39 |
+
_inputs = model.eval_process(inputs=inputs,
|
40 |
+
data='demo',
|
41 |
+
tokenizer=tokenizer,
|
42 |
+
device=device)
|
43 |
+
generation_kwargs = _inputs
|
44 |
+
generation_kwargs.update({'streamer': streamer})
|
45 |
+
generation_kwargs.update({'do_sample': True})
|
46 |
+
generation_kwargs.update({'max_new_tokens': new_max_token})
|
47 |
+
generation_kwargs.update({'top_p': top_p})
|
48 |
+
generation_kwargs.update({'temperature': temperature})
|
49 |
+
generation_kwargs.update({'use_cache': True})
|
50 |
+
return model.generate(**generation_kwargs)
|
51 |
+
|
52 |
+
# @spaces.GPU
|
53 |
+
def bot_streaming(message, history, link, temperature, new_max_token, top_p):
|
54 |
+
|
55 |
+
# model selection
|
56 |
+
if "1.8B" in link:
|
57 |
+
model = model_1_8
|
58 |
+
tokenizer = tokenizer_1_8
|
59 |
+
elif "3.8B" in link:
|
60 |
+
model = model_3_8
|
61 |
+
tokenizer = tokenizer_3_8
|
62 |
+
elif "7B" in link:
|
63 |
+
model = model_7
|
64 |
+
tokenizer = tokenizer_7
|
65 |
+
|
66 |
+
# X -> float16 conversion
|
67 |
+
for param in model.parameters():
|
68 |
+
if 'float32' in str(param.dtype).lower() or 'float16' in str(param.dtype).lower():
|
69 |
+
param.data = param.data.to(torch.bfloat16)
|
70 |
+
|
71 |
+
# cpu -> gpu
|
72 |
+
for param in model.parameters():
|
73 |
+
if not param.is_cuda:
|
74 |
+
param.data = param.to(accel.device)
|
75 |
+
|
76 |
+
try:
|
77 |
+
# prompt type -> input prompt
|
78 |
+
if len(message['files']) == 1:
|
79 |
+
# Image Load
|
80 |
+
image = pil_to_tensor(Image.open(message['files'][0]).convert("RGB"))
|
81 |
+
inputs = [{'image': image.to(accel.device), 'question': message['text']}]
|
82 |
+
elif len(message['files']) > 1:
|
83 |
+
raise Exception("No way!")
|
84 |
+
else:
|
85 |
+
inputs = [{'question': message['text']}]
|
86 |
+
|
87 |
+
# Text Generation
|
88 |
+
with torch.inference_mode():
|
89 |
+
# kwargs
|
90 |
+
streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
|
91 |
+
|
92 |
+
# Threading generation
|
93 |
+
thread = Thread(target=threading_function, kwargs=dict(inputs=inputs,
|
94 |
+
streamer=streamer,
|
95 |
+
model=model,
|
96 |
+
tokenizer=tokenizer,
|
97 |
+
device=accel.device,
|
98 |
+
temperature=temperature,
|
99 |
+
new_max_token=new_max_token,
|
100 |
+
top_p=top_p))
|
101 |
+
thread.start()
|
102 |
+
|
103 |
+
# generated text
|
104 |
+
generated_text = ""
|
105 |
+
for new_text in streamer:
|
106 |
+
generated_text += new_text
|
107 |
+
generated_text
|
108 |
+
|
109 |
+
# Text decoding
|
110 |
+
response = output_filtering(generated_text, model)
|
111 |
+
|
112 |
+
except:
|
113 |
+
response = "There may be unsupported format: ex) pdf, video, sound. Only supported is a single image in this version."
|
114 |
+
|
115 |
+
# private log print
|
116 |
+
text = message['text']
|
117 |
+
files = message['files']
|
118 |
+
print('-----------------------------')
|
119 |
+
print(f'Link: {link}')
|
120 |
+
print(f'Text: {text}')
|
121 |
+
print(f'MM Files: {files}')
|
122 |
+
print(f'Response: {response}')
|
123 |
+
print('-----------------------------\n')
|
124 |
+
|
125 |
+
|
126 |
+
buffer = ""
|
127 |
+
for character in response:
|
128 |
+
buffer += character
|
129 |
+
time.sleep(0.012)
|
130 |
+
yield buffer
|
131 |
+
|
132 |
+
demo = gr.ChatInterface(fn=bot_streaming,
|
133 |
+
additional_inputs = [gr.Radio(["1.8B", "3.8B", "7B"], label="Size", info="Select one model size", value="7B"), gr.Slider(0, 1, 0.9, label="temperature"), gr.Slider(1, 1024, 128, label="new_max_token"), gr.Slider(0, 1, 0.95, label="top_p")],
|
134 |
+
additional_inputs_accordion="Generation Hyperparameters",
|
135 |
+
theme=gr.themes.Soft(),
|
136 |
+
title="Phantom",
|
137 |
+
description="Phantom is super efficient 0.5B, 1.8B, 3.8B, and 7B size Large Language and Vision Models built on new propagation strategy. "
|
138 |
+
"Its inference speed highly depends on assinging non-scheduled GPU. (Therefore, once all GPUs are busy, then inference may be taken in infinity) "
|
139 |
+
"Note that, we don't support history-based conversation referring to previous dialogue",
|
140 |
+
stop_btn="Stop Generation", multimodal=True)
|
141 |
+
demo.launch()
|
config.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Checkpoints & Dataset root
|
2 |
+
MODEL_7B="BK-Lee/Phantom-7B"
|
3 |
+
MODEL_3_8B="BK-Lee/Phantom-3.8B"
|
4 |
+
MODEL_1_8B="BK-Lee/Phantom-1.8B"
|
5 |
+
MODEL_0_5B="BK-Lee/Phantom-0.5B"
|
model/arch_0_5b/configuration_intern_vit.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
logger = logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
class InternVisionConfig(PretrainedConfig):
|
11 |
+
r"""
|
12 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
13 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
14 |
+
|
15 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
16 |
+
documentation from [`PretrainedConfig`] for more information.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
num_channels (`int`, *optional*, defaults to 3):
|
20 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
21 |
+
patch_size (`int`, *optional*, defaults to 14):
|
22 |
+
The size (resolution) of each patch.
|
23 |
+
image_size (`int`, *optional*, defaults to 224):
|
24 |
+
The size (resolution) of each image.
|
25 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
26 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
27 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
28 |
+
Dimensionality of the encoder layers and the pooler layer.
|
29 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
30 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
31 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
32 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
33 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
34 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
35 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
36 |
+
Number of hidden layers in the Transformer encoder.
|
37 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
38 |
+
Whether to use flash attention mechanism.
|
39 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
40 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
41 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
42 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
43 |
+
The epsilon used by the layer normalization layers.
|
44 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
45 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
46 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
47 |
+
Dropout rate for stochastic depth.
|
48 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
49 |
+
The dropout ratio for the attention probabilities.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
53 |
+
A factor for layer scale.
|
54 |
+
"""
|
55 |
+
|
56 |
+
model_type = 'intern_vit_300m'
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
num_channels=3,
|
61 |
+
patch_size=14,
|
62 |
+
image_size=224,
|
63 |
+
qkv_bias=False,
|
64 |
+
hidden_size=3200,
|
65 |
+
num_attention_heads=25,
|
66 |
+
intermediate_size=12800,
|
67 |
+
qk_normalization=True,
|
68 |
+
num_hidden_layers=48,
|
69 |
+
use_flash_attn=True,
|
70 |
+
hidden_act='gelu',
|
71 |
+
norm_type='rms_norm',
|
72 |
+
layer_norm_eps=1e-6,
|
73 |
+
dropout=0.0,
|
74 |
+
drop_path_rate=0.0,
|
75 |
+
attention_dropout=0.0,
|
76 |
+
initializer_range=0.02,
|
77 |
+
initializer_factor=0.1,
|
78 |
+
**kwargs,
|
79 |
+
):
|
80 |
+
super().__init__(**kwargs)
|
81 |
+
|
82 |
+
self.hidden_size = hidden_size
|
83 |
+
self.intermediate_size = intermediate_size
|
84 |
+
self.dropout = dropout
|
85 |
+
self.drop_path_rate = drop_path_rate
|
86 |
+
self.num_hidden_layers = num_hidden_layers
|
87 |
+
self.num_attention_heads = num_attention_heads
|
88 |
+
self.num_channels = num_channels
|
89 |
+
self.patch_size = patch_size
|
90 |
+
self.image_size = image_size
|
91 |
+
self.initializer_range = initializer_range
|
92 |
+
self.initializer_factor = initializer_factor
|
93 |
+
self.attention_dropout = attention_dropout
|
94 |
+
self.layer_norm_eps = layer_norm_eps
|
95 |
+
self.hidden_act = hidden_act
|
96 |
+
self.norm_type = norm_type
|
97 |
+
self.qkv_bias = qkv_bias
|
98 |
+
self.qk_normalization = qk_normalization
|
99 |
+
self.use_flash_attn = use_flash_attn
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
103 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
104 |
+
|
105 |
+
if 'vision_config' in config_dict:
|
106 |
+
config_dict = config_dict['vision_config']
|
107 |
+
|
108 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
109 |
+
logger.warning(
|
110 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
111 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
112 |
+
)
|
113 |
+
|
114 |
+
return cls.from_dict(config_dict, **kwargs)
|
model/arch_0_5b/configuration_phantom.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
|
3 |
+
from transformers import LlamaConfig, Qwen2Config
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
from .configuration_intern_vit import InternVisionConfig
|
8 |
+
|
9 |
+
logger = logging.get_logger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class PhantomConfig(PretrainedConfig):
|
13 |
+
model_type = 'phantom'
|
14 |
+
is_composition = True
|
15 |
+
|
16 |
+
def __init__(
|
17 |
+
self,
|
18 |
+
vision_config=None,
|
19 |
+
llm_config=None,
|
20 |
+
use_backbone_lora=0,
|
21 |
+
use_llm_lora=0,
|
22 |
+
force_image_size=None,
|
23 |
+
downsample_ratio=0.5,
|
24 |
+
template=None,
|
25 |
+
dynamic_image_size=False,
|
26 |
+
use_thumbnail=False,
|
27 |
+
min_dynamic_patch=1,
|
28 |
+
max_dynamic_patch=6,
|
29 |
+
**kwargs):
|
30 |
+
super().__init__(**kwargs)
|
31 |
+
|
32 |
+
if vision_config is None:
|
33 |
+
vision_config = {}
|
34 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
35 |
+
|
36 |
+
if llm_config is None:
|
37 |
+
llm_config = {}
|
38 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
39 |
+
|
40 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
41 |
+
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
42 |
+
self.llm_config = LlamaConfig(**llm_config)
|
43 |
+
elif llm_config['architectures'][0] == 'Qwen2ForCausalLM':
|
44 |
+
self.llm_config = Qwen2Config(**llm_config)
|
45 |
+
else:
|
46 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
47 |
+
self.use_backbone_lora = use_backbone_lora
|
48 |
+
self.use_llm_lora = use_llm_lora
|
49 |
+
self.force_image_size = force_image_size
|
50 |
+
self.downsample_ratio = downsample_ratio
|
51 |
+
self.template = template
|
52 |
+
self.dynamic_image_size = dynamic_image_size
|
53 |
+
self.use_thumbnail = use_thumbnail
|
54 |
+
self.min_dynamic_patch = min_dynamic_patch
|
55 |
+
self.max_dynamic_patch = max_dynamic_patch
|
56 |
+
|
57 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
58 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
59 |
+
|
60 |
+
def to_dict(self):
|
61 |
+
"""
|
62 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
63 |
+
|
64 |
+
Returns:
|
65 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
66 |
+
"""
|
67 |
+
output = copy.deepcopy(self.__dict__)
|
68 |
+
output['vision_config'] = self.vision_config.to_dict()
|
69 |
+
output['llm_config'] = self.llm_config.to_dict()
|
70 |
+
output['model_type'] = self.__class__.model_type
|
71 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
72 |
+
output['use_llm_lora'] = self.use_llm_lora
|
73 |
+
output['force_image_size'] = self.force_image_size
|
74 |
+
output['downsample_ratio'] = self.downsample_ratio
|
75 |
+
output['template'] = self.template
|
76 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
77 |
+
output['use_thumbnail'] = self.use_thumbnail
|
78 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
79 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
80 |
+
|
81 |
+
return output
|
model/arch_0_5b/modeling_intern_vit.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.utils.checkpoint
|
6 |
+
from einops import rearrange
|
7 |
+
from timm.models.layers import DropPath
|
8 |
+
from torch import nn
|
9 |
+
from transformers.activations import ACT2FN
|
10 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
11 |
+
BaseModelOutputWithPooling)
|
12 |
+
from transformers.modeling_utils import PreTrainedModel
|
13 |
+
from transformers.utils import logging
|
14 |
+
|
15 |
+
from .configuration_intern_vit import InternVisionConfig
|
16 |
+
|
17 |
+
try:
|
18 |
+
try: # v1
|
19 |
+
from flash_attn.flash_attn_interface import \
|
20 |
+
flash_attn_unpadded_qkvpacked_func
|
21 |
+
except: # v2
|
22 |
+
from flash_attn.flash_attn_interface import \
|
23 |
+
flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
24 |
+
|
25 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
26 |
+
|
27 |
+
has_flash_attn = True
|
28 |
+
except:
|
29 |
+
print('FlashAttention is not installed.')
|
30 |
+
has_flash_attn = False
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
class FlashAttention(nn.Module):
|
36 |
+
"""Implement the scaled dot product attention with softmax.
|
37 |
+
Arguments
|
38 |
+
---------
|
39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
41 |
+
runtime)
|
42 |
+
attention_dropout: The dropout rate to apply to the attention
|
43 |
+
(default: 0.0)
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
47 |
+
super().__init__()
|
48 |
+
self.softmax_scale = softmax_scale
|
49 |
+
self.dropout_p = attention_dropout
|
50 |
+
|
51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
52 |
+
max_s=None, need_weights=False):
|
53 |
+
"""Implements the multihead softmax attention.
|
54 |
+
Arguments
|
55 |
+
---------
|
56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
57 |
+
if unpadded: (nnz, 3, h, d)
|
58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
59 |
+
"""
|
60 |
+
assert not need_weights
|
61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
62 |
+
assert qkv.is_cuda
|
63 |
+
|
64 |
+
if cu_seqlens is None:
|
65 |
+
batch_size = qkv.shape[0]
|
66 |
+
seqlen = qkv.shape[1]
|
67 |
+
if key_padding_mask is None:
|
68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
69 |
+
max_s = seqlen
|
70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
71 |
+
device=qkv.device)
|
72 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
75 |
+
)
|
76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
77 |
+
else:
|
78 |
+
nheads = qkv.shape[-2]
|
79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
82 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
85 |
+
)
|
86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
87 |
+
indices, batch_size, seqlen),
|
88 |
+
'b s (h d) -> b s h d', h=nheads)
|
89 |
+
else:
|
90 |
+
assert max_s is not None
|
91 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
94 |
+
)
|
95 |
+
|
96 |
+
return output, None
|
97 |
+
|
98 |
+
|
99 |
+
class InternRMSNorm(nn.Module):
|
100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
101 |
+
super().__init__()
|
102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
103 |
+
self.variance_epsilon = eps
|
104 |
+
|
105 |
+
def forward(self, hidden_states):
|
106 |
+
input_dtype = hidden_states.dtype
|
107 |
+
hidden_states = hidden_states.to(torch.float32)
|
108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
110 |
+
return self.weight * hidden_states.to(input_dtype)
|
111 |
+
|
112 |
+
|
113 |
+
try:
|
114 |
+
from apex.normalization import FusedRMSNorm
|
115 |
+
|
116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
117 |
+
|
118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
119 |
+
except ImportError:
|
120 |
+
# using the normal InternRMSNorm
|
121 |
+
pass
|
122 |
+
except Exception:
|
123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
NORM2FN = {
|
128 |
+
'rms_norm': InternRMSNorm,
|
129 |
+
'layer_norm': nn.LayerNorm,
|
130 |
+
}
|
131 |
+
|
132 |
+
|
133 |
+
class InternVisionEmbeddings(nn.Module):
|
134 |
+
def __init__(self, config: InternVisionConfig):
|
135 |
+
super().__init__()
|
136 |
+
self.config = config
|
137 |
+
self.embed_dim = config.hidden_size
|
138 |
+
self.image_size = config.image_size
|
139 |
+
self.patch_size = config.patch_size
|
140 |
+
|
141 |
+
self.class_embedding = nn.Parameter(
|
142 |
+
torch.randn(1, 1, self.embed_dim),
|
143 |
+
)
|
144 |
+
|
145 |
+
self.patch_embedding = nn.Conv2d(
|
146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
147 |
+
)
|
148 |
+
|
149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
150 |
+
self.num_positions = self.num_patches + 1
|
151 |
+
|
152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
153 |
+
|
154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
155 |
+
target_dtype = pos_embed.dtype
|
156 |
+
pos_embed = pos_embed.float().reshape(
|
157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
160 |
+
return pos_embed
|
161 |
+
|
162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
165 |
+
batch_size, _, height, width = patch_embeds.shape
|
166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
169 |
+
position_embedding = torch.cat([
|
170 |
+
self.position_embedding[:, :1, :],
|
171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
172 |
+
], dim=1)
|
173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
174 |
+
return embeddings
|
175 |
+
|
176 |
+
|
177 |
+
class InternAttention(nn.Module):
|
178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
179 |
+
|
180 |
+
def __init__(self, config: InternVisionConfig):
|
181 |
+
super().__init__()
|
182 |
+
self.config = config
|
183 |
+
self.embed_dim = config.hidden_size
|
184 |
+
self.num_heads = config.num_attention_heads
|
185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
186 |
+
if config.use_flash_attn and not has_flash_attn:
|
187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
190 |
+
raise ValueError(
|
191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
192 |
+
f' {self.num_heads}).'
|
193 |
+
)
|
194 |
+
|
195 |
+
self.scale = self.head_dim ** -0.5
|
196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
199 |
+
|
200 |
+
self.qk_normalization = config.qk_normalization
|
201 |
+
|
202 |
+
if self.qk_normalization:
|
203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
205 |
+
|
206 |
+
if self.use_flash_attn:
|
207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
209 |
+
|
210 |
+
def _naive_attn(self, x):
|
211 |
+
B, N, C = x.shape
|
212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
214 |
+
|
215 |
+
if self.qk_normalization:
|
216 |
+
B_, H_, N_, D_ = q.shape
|
217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
219 |
+
|
220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
221 |
+
attn = attn.softmax(dim=-1)
|
222 |
+
attn = self.attn_drop(attn)
|
223 |
+
|
224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
225 |
+
x = self.proj(x)
|
226 |
+
x = self.proj_drop(x)
|
227 |
+
return x
|
228 |
+
|
229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
230 |
+
qkv = self.qkv(x)
|
231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
232 |
+
|
233 |
+
if self.qk_normalization:
|
234 |
+
q, k, v = qkv.unbind(2)
|
235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
238 |
+
|
239 |
+
context, _ = self.inner_attn(
|
240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
241 |
+
)
|
242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
243 |
+
outs = self.proj_drop(outs)
|
244 |
+
return outs
|
245 |
+
|
246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
248 |
+
return x
|
249 |
+
|
250 |
+
|
251 |
+
class InternMLP(nn.Module):
|
252 |
+
def __init__(self, config: InternVisionConfig):
|
253 |
+
super().__init__()
|
254 |
+
self.config = config
|
255 |
+
self.act = ACT2FN[config.hidden_act]
|
256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
258 |
+
|
259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
260 |
+
hidden_states = self.fc1(hidden_states)
|
261 |
+
hidden_states = self.act(hidden_states)
|
262 |
+
hidden_states = self.fc2(hidden_states)
|
263 |
+
return hidden_states
|
264 |
+
|
265 |
+
|
266 |
+
class InternVisionEncoderLayer(nn.Module):
|
267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
268 |
+
super().__init__()
|
269 |
+
self.embed_dim = config.hidden_size
|
270 |
+
self.intermediate_size = config.intermediate_size
|
271 |
+
self.norm_type = config.norm_type
|
272 |
+
|
273 |
+
self.attn = InternAttention(config)
|
274 |
+
self.mlp = InternMLP(config)
|
275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
277 |
+
|
278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
282 |
+
|
283 |
+
def forward(
|
284 |
+
self,
|
285 |
+
hidden_states: torch.Tensor,
|
286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
287 |
+
"""
|
288 |
+
Args:
|
289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
290 |
+
"""
|
291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
|
292 |
+
|
293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
|
294 |
+
|
295 |
+
return hidden_states
|
296 |
+
|
297 |
+
|
298 |
+
class InternVisionEncoder(nn.Module):
|
299 |
+
"""
|
300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
301 |
+
[`InternEncoderLayer`].
|
302 |
+
|
303 |
+
Args:
|
304 |
+
config (`InternConfig`):
|
305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
306 |
+
"""
|
307 |
+
|
308 |
+
def __init__(self, config: InternVisionConfig):
|
309 |
+
super().__init__()
|
310 |
+
self.config = config
|
311 |
+
# stochastic depth decay rule
|
312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
313 |
+
self.layers = nn.ModuleList([
|
314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
315 |
+
self.gradient_checkpointing = False
|
316 |
+
|
317 |
+
def forward(
|
318 |
+
self,
|
319 |
+
inputs_embeds,
|
320 |
+
output_hidden_states: Optional[bool] = None,
|
321 |
+
return_dict: Optional[bool] = None,
|
322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
323 |
+
r"""
|
324 |
+
Args:
|
325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
327 |
+
output_hidden_states (`bool`, *optional*):
|
328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
329 |
+
for more detail.
|
330 |
+
return_dict (`bool`, *optional*):
|
331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
332 |
+
"""
|
333 |
+
output_hidden_states = (
|
334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
335 |
+
)
|
336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
337 |
+
|
338 |
+
encoder_states = () if output_hidden_states else None
|
339 |
+
hidden_states = inputs_embeds
|
340 |
+
|
341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
342 |
+
if output_hidden_states:
|
343 |
+
encoder_states = encoder_states + (hidden_states,)
|
344 |
+
if self.gradient_checkpointing and self.training:
|
345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
346 |
+
encoder_layer,
|
347 |
+
hidden_states)
|
348 |
+
else:
|
349 |
+
layer_outputs = encoder_layer(
|
350 |
+
hidden_states,
|
351 |
+
)
|
352 |
+
hidden_states = layer_outputs
|
353 |
+
|
354 |
+
if output_hidden_states:
|
355 |
+
encoder_states = encoder_states + (hidden_states,)
|
356 |
+
|
357 |
+
if not return_dict:
|
358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
359 |
+
return BaseModelOutput(
|
360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
class InternVisionModel(PreTrainedModel):
|
365 |
+
main_input_name = 'pixel_values'
|
366 |
+
_supports_flash_attn_2 = True
|
367 |
+
config_class = InternVisionConfig
|
368 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
369 |
+
|
370 |
+
def __init__(self, config: InternVisionConfig):
|
371 |
+
super().__init__(config)
|
372 |
+
self.config = config
|
373 |
+
|
374 |
+
self.embeddings = InternVisionEmbeddings(config)
|
375 |
+
self.encoder = InternVisionEncoder(config)
|
376 |
+
|
377 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
378 |
+
pos_emb = self.embeddings.position_embedding
|
379 |
+
_, num_positions, embed_dim = pos_emb.shape
|
380 |
+
cls_emb = pos_emb[:, :1, :]
|
381 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
382 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
383 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
384 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
385 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
386 |
+
self.embeddings.image_size = new_size
|
387 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
388 |
+
|
389 |
+
def get_input_embeddings(self):
|
390 |
+
return self.embeddings
|
391 |
+
|
392 |
+
def forward(
|
393 |
+
self,
|
394 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
395 |
+
output_hidden_states: Optional[bool] = None,
|
396 |
+
return_dict: Optional[bool] = None,
|
397 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
398 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
399 |
+
output_hidden_states = (
|
400 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
401 |
+
)
|
402 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
403 |
+
|
404 |
+
if pixel_values is None and pixel_embeds is None:
|
405 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
406 |
+
|
407 |
+
if pixel_embeds is not None:
|
408 |
+
hidden_states = pixel_embeds
|
409 |
+
else:
|
410 |
+
if len(pixel_values.shape) == 4:
|
411 |
+
hidden_states = self.embeddings(pixel_values)
|
412 |
+
else:
|
413 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
414 |
+
encoder_outputs = self.encoder(
|
415 |
+
inputs_embeds=hidden_states,
|
416 |
+
output_hidden_states=output_hidden_states,
|
417 |
+
return_dict=return_dict,
|
418 |
+
)
|
419 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
420 |
+
pooled_output = last_hidden_state[:, 0, :]
|
421 |
+
|
422 |
+
if not return_dict:
|
423 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
424 |
+
|
425 |
+
return BaseModelOutputWithPooling(
|
426 |
+
last_hidden_state=last_hidden_state,
|
427 |
+
pooler_output=pooled_output,
|
428 |
+
hidden_states=encoder_outputs.hidden_states,
|
429 |
+
attentions=encoder_outputs.attentions,
|
430 |
+
)
|
model/arch_0_5b/modeling_phantom.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch.utils.checkpoint
|
4 |
+
from torch import nn
|
5 |
+
from transformers import GenerationConfig
|
6 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
7 |
+
from transformers.modeling_utils import PreTrainedModel
|
8 |
+
|
9 |
+
from .configuration_phantom import PhantomConfig
|
10 |
+
from .modeling_intern_vit import InternVisionModel
|
11 |
+
|
12 |
+
from utils.utils import *
|
13 |
+
from model.arch_0_5b.modeling_qwen2 import Qwen2ForCausalLM
|
14 |
+
|
15 |
+
class PhantomForCausalLM(PreTrainedModel):
|
16 |
+
config_class = PhantomConfig
|
17 |
+
main_input_name = 'pixel_values'
|
18 |
+
_supports_flash_attn_2 = True
|
19 |
+
_no_split_modules = ['InternVisionModel', 'Qwen2DecoderLayer']
|
20 |
+
|
21 |
+
def __init__(self, config: PhantomConfig):
|
22 |
+
super().__init__(config)
|
23 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
24 |
+
patch_size = config.vision_config.patch_size
|
25 |
+
self.patch_size = patch_size
|
26 |
+
self.template = config.template
|
27 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
28 |
+
self.downsample_ratio = config.downsample_ratio
|
29 |
+
|
30 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
31 |
+
self.language_model = Qwen2ForCausalLM(config.llm_config)
|
32 |
+
|
33 |
+
vit_hidden_size = config.vision_config.hidden_size
|
34 |
+
llm_hidden_size = config.llm_config.hidden_size
|
35 |
+
|
36 |
+
self.vision_proj = nn.Sequential(
|
37 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
38 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
39 |
+
nn.GELU(),
|
40 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
41 |
+
)
|
42 |
+
|
43 |
+
# prompt rule
|
44 |
+
self.prompt_rule = {
|
45 |
+
"system_start": "<|im_start|>system\n",
|
46 |
+
"system_end": "<|im_end|>",
|
47 |
+
"user_start": "<|im_start|>user\n",
|
48 |
+
"user_end": "<|im_end|>",
|
49 |
+
"assistant_start": "<|im_start|>assistant\n",
|
50 |
+
"assistant_end": "<|im_end|>",
|
51 |
+
"test_start": "assistant\n",
|
52 |
+
"test_end": "<|im_end|>",
|
53 |
+
"split": "",
|
54 |
+
}
|
55 |
+
|
56 |
+
def eval_process(
|
57 |
+
self,
|
58 |
+
inputs,
|
59 |
+
tokenizer,
|
60 |
+
data,
|
61 |
+
device,
|
62 |
+
):
|
63 |
+
batched_image=[]
|
64 |
+
batched_qa_prompt=[]
|
65 |
+
batched_phantom_position = []
|
66 |
+
for _input in inputs:
|
67 |
+
|
68 |
+
# making image prompt
|
69 |
+
if 'image' in _input.keys() and _input['image'] != None:
|
70 |
+
process_image = dynamic_preprocess(_input['image'].to(device))
|
71 |
+
dynamic_process_image = torch.stack([dynamic_transform(image) for image in process_image]).to(device)
|
72 |
+
img_token_number = dynamic_process_image.shape[0] * 256
|
73 |
+
batched_image.append(dynamic_process_image)
|
74 |
+
|
75 |
+
# make question and answer
|
76 |
+
question = _input['question']
|
77 |
+
|
78 |
+
# make instruction (qa pair) and label
|
79 |
+
qa_prompt = make_instruction(question, data, self.prompt_rule)
|
80 |
+
|
81 |
+
# adding image special tokens to question
|
82 |
+
if 'image' in _input.keys():
|
83 |
+
qa_prompt = qa_prompt.replace('<image>', '<img><IMG_CONTEXT></img>')
|
84 |
+
|
85 |
+
# add bundle image tokens if it has <image> token
|
86 |
+
qa_prompt = add_bundle_tokens(qa_prompt, '<IMG_CONTEXT>', img_token_number)
|
87 |
+
|
88 |
+
# phantom_position
|
89 |
+
label = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].to(device)
|
90 |
+
phantom_position = torch.zeros_like(label)
|
91 |
+
phantom_position[0] = 1
|
92 |
+
|
93 |
+
# batched processing
|
94 |
+
batched_qa_prompt.append(qa_prompt)
|
95 |
+
batched_phantom_position.append(phantom_position.flip(dims=[0]))
|
96 |
+
|
97 |
+
'''For Final Outputs'''
|
98 |
+
qa_prompts = tokenizer(batched_qa_prompt, padding='longest', return_tensors="pt", add_special_tokens=False)
|
99 |
+
|
100 |
+
# [1] input_ids
|
101 |
+
input_ids = qa_prompts.input_ids.to(device)
|
102 |
+
|
103 |
+
# [2] attention_mask
|
104 |
+
attention_mask = qa_prompts.attention_mask.to(device)
|
105 |
+
|
106 |
+
# [3] Phantom Position
|
107 |
+
batched_phantom_position = torch.nn.utils.rnn.pad_sequence(batched_phantom_position, batch_first=True, padding_value=0).flip(dims=[1]) # padding left
|
108 |
+
|
109 |
+
if len(batched_image):
|
110 |
+
return {"input_ids": input_ids,
|
111 |
+
"attention_mask": attention_mask,
|
112 |
+
"pixel_values": torch.cat(batched_image, dim=0).to(device),
|
113 |
+
"phantom_position": batched_phantom_position.bool()
|
114 |
+
}
|
115 |
+
else:
|
116 |
+
return {"input_ids": input_ids,
|
117 |
+
"attention_mask": attention_mask,
|
118 |
+
"phantom_position": batched_phantom_position.bool()
|
119 |
+
}
|
120 |
+
|
121 |
+
def extract_feature(self, pixel_values):
|
122 |
+
vit_embeds = self.vision_model(
|
123 |
+
pixel_values=pixel_values,
|
124 |
+
output_hidden_states=False,
|
125 |
+
return_dict=True).last_hidden_state
|
126 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
127 |
+
|
128 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
129 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
130 |
+
vit_embeds = pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
131 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
132 |
+
vit_embeds = self.vision_proj(vit_embeds)
|
133 |
+
return vit_embeds
|
134 |
+
|
135 |
+
@torch.no_grad()
|
136 |
+
def generate(
|
137 |
+
self,
|
138 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
139 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
140 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
141 |
+
phantom_position: torch.BoolTensor = None,
|
142 |
+
generation_config: Optional[GenerationConfig] = None,
|
143 |
+
output_hidden_states: Optional[bool] = None,
|
144 |
+
return_dict: Optional[bool] = None,
|
145 |
+
**generate_kwargs,
|
146 |
+
) -> torch.LongTensor:
|
147 |
+
|
148 |
+
if pixel_values is not None:
|
149 |
+
vit_embeds = self.extract_feature(pixel_values.to(torch.bfloat16))
|
150 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
151 |
+
B, N, C = input_embeds.shape
|
152 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
153 |
+
|
154 |
+
input_ids = input_ids.reshape(B * N)
|
155 |
+
selected = (input_ids == self.config.image_token_index)
|
156 |
+
assert selected.sum() != 0
|
157 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
158 |
+
|
159 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
160 |
+
else:
|
161 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
162 |
+
|
163 |
+
outputs = self.language_model.generate(
|
164 |
+
inputs_embeds=input_embeds,
|
165 |
+
attention_mask=attention_mask,
|
166 |
+
phantom_position=phantom_position,
|
167 |
+
generation_config=generation_config,
|
168 |
+
output_hidden_states=output_hidden_states,
|
169 |
+
return_dict=return_dict,
|
170 |
+
use_cache=True,
|
171 |
+
pad_token_id=self.config.eos_token_id,
|
172 |
+
eos_token_id=self.config.eos_token_id,
|
173 |
+
**generate_kwargs,
|
174 |
+
)
|
175 |
+
|
176 |
+
return outputs
|
model/arch_0_5b/modeling_qwen2.py
ADDED
@@ -0,0 +1,1688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
5 |
+
# and OPT implementations in this library. It has been modified from its
|
6 |
+
# original forms to accommodate minor architectural differences compared
|
7 |
+
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
8 |
+
#
|
9 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
10 |
+
# you may not use this file except in compliance with the License.
|
11 |
+
# You may obtain a copy of the License at
|
12 |
+
#
|
13 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
14 |
+
#
|
15 |
+
# Unless required by applicable law or agreed to in writing, software
|
16 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
17 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
18 |
+
# See the License for the specific language governing permissions and
|
19 |
+
# limitations under the License.
|
20 |
+
"""PyTorch Qwen2 model."""
|
21 |
+
|
22 |
+
import inspect
|
23 |
+
import math
|
24 |
+
from typing import List, Optional, Tuple, Union
|
25 |
+
|
26 |
+
import torch
|
27 |
+
import torch.nn.functional as F
|
28 |
+
import torch.utils.checkpoint
|
29 |
+
from torch import nn
|
30 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
+
|
32 |
+
from transformers.activations import ACT2FN
|
33 |
+
from transformers.cache_utils import Cache, DynamicCache, StaticCache
|
34 |
+
from transformers.modeling_attn_mask_utils import (
|
35 |
+
AttentionMaskConverter,
|
36 |
+
)
|
37 |
+
from transformers.modeling_outputs import (
|
38 |
+
BaseModelOutputWithPast,
|
39 |
+
CausalLMOutputWithPast,
|
40 |
+
SequenceClassifierOutputWithPast,
|
41 |
+
TokenClassifierOutput,
|
42 |
+
)
|
43 |
+
from transformers.modeling_utils import PreTrainedModel
|
44 |
+
from transformers.utils import (
|
45 |
+
add_start_docstrings,
|
46 |
+
add_start_docstrings_to_model_forward,
|
47 |
+
is_flash_attn_2_available,
|
48 |
+
is_flash_attn_greater_or_equal_2_10,
|
49 |
+
logging,
|
50 |
+
replace_return_docstrings,
|
51 |
+
)
|
52 |
+
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
|
53 |
+
|
54 |
+
# Phantom
|
55 |
+
from utils.utils import *
|
56 |
+
|
57 |
+
if is_flash_attn_2_available():
|
58 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
59 |
+
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
60 |
+
|
61 |
+
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
62 |
+
|
63 |
+
|
64 |
+
logger = logging.get_logger(__name__)
|
65 |
+
|
66 |
+
|
67 |
+
_CHECKPOINT_FOR_DOC = "Qwen/Qwen2-7B-beta"
|
68 |
+
_CONFIG_FOR_DOC = "Qwen2Config"
|
69 |
+
|
70 |
+
|
71 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
72 |
+
def _get_unpad_data(attention_mask):
|
73 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
74 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
75 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
76 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
77 |
+
return (
|
78 |
+
indices,
|
79 |
+
cu_seqlens,
|
80 |
+
max_seqlen_in_batch,
|
81 |
+
)
|
82 |
+
|
83 |
+
|
84 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2
|
85 |
+
class Qwen2RMSNorm(nn.Module):
|
86 |
+
def __init__(self, hidden_size, eps=1e-6):
|
87 |
+
"""
|
88 |
+
Qwen2RMSNorm is equivalent to T5LayerNorm
|
89 |
+
"""
|
90 |
+
super().__init__()
|
91 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
92 |
+
self.variance_epsilon = eps
|
93 |
+
|
94 |
+
def forward(self, hidden_states):
|
95 |
+
input_dtype = hidden_states.dtype
|
96 |
+
hidden_states = hidden_states.to(torch.float32)
|
97 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
98 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
99 |
+
return self.weight * hidden_states.to(input_dtype)
|
100 |
+
|
101 |
+
|
102 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.MixtralRotaryEmbedding with Mixtral->Qwen2
|
103 |
+
class Qwen2RotaryEmbedding(nn.Module):
|
104 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
105 |
+
super().__init__()
|
106 |
+
|
107 |
+
self.dim = dim
|
108 |
+
self.max_position_embeddings = max_position_embeddings
|
109 |
+
self.base = base
|
110 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
111 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
112 |
+
|
113 |
+
# Build here to make `torch.jit.trace` work.
|
114 |
+
self._set_cos_sin_cache(
|
115 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
116 |
+
)
|
117 |
+
|
118 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
119 |
+
self.max_seq_len_cached = seq_len
|
120 |
+
t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
|
121 |
+
|
122 |
+
freqs = torch.outer(t, self.inv_freq)
|
123 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
124 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
125 |
+
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
|
126 |
+
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
|
127 |
+
|
128 |
+
def forward(self, x, seq_len=None):
|
129 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
130 |
+
if seq_len > self.max_seq_len_cached:
|
131 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
|
132 |
+
|
133 |
+
return (
|
134 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
135 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
136 |
+
)
|
137 |
+
|
138 |
+
|
139 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
140 |
+
def rotate_half(x):
|
141 |
+
"""Rotates half the hidden dims of the input."""
|
142 |
+
x1 = x[..., : x.shape[-1] // 2]
|
143 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
144 |
+
return torch.cat((-x2, x1), dim=-1)
|
145 |
+
|
146 |
+
|
147 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.apply_rotary_pos_emb
|
148 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
149 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
150 |
+
|
151 |
+
Args:
|
152 |
+
q (`torch.Tensor`): The query tensor.
|
153 |
+
k (`torch.Tensor`): The key tensor.
|
154 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
155 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
156 |
+
position_ids (`torch.Tensor`):
|
157 |
+
The position indices of the tokens corresponding to the query and key tensors. For example, this can be
|
158 |
+
used to pass offsetted position ids when working with a KV-cache.
|
159 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
160 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
161 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
162 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
163 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
164 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
165 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
166 |
+
Returns:
|
167 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
168 |
+
"""
|
169 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
170 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
171 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
172 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
173 |
+
return q_embed, k_embed
|
174 |
+
|
175 |
+
|
176 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2
|
177 |
+
class Qwen2MLP(nn.Module):
|
178 |
+
def __init__(self, config):
|
179 |
+
super().__init__()
|
180 |
+
self.hidden_size = config.hidden_size
|
181 |
+
self.intermediate_size = config.intermediate_size
|
182 |
+
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
183 |
+
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
184 |
+
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
185 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
186 |
+
|
187 |
+
def forward(self, hidden_state):
|
188 |
+
return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
|
189 |
+
|
190 |
+
|
191 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv
|
192 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
193 |
+
"""
|
194 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
195 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
196 |
+
"""
|
197 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
198 |
+
if n_rep == 1:
|
199 |
+
return hidden_states
|
200 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
201 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
202 |
+
|
203 |
+
|
204 |
+
class Qwen2Attention(nn.Module):
|
205 |
+
"""
|
206 |
+
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
207 |
+
and "Generating Long Sequences with Sparse Transformers".
|
208 |
+
"""
|
209 |
+
|
210 |
+
def __init__(self, config: Qwen2Config, layer_idx: Optional[int] = None):
|
211 |
+
super().__init__()
|
212 |
+
self.config = config
|
213 |
+
self.layer_idx = layer_idx
|
214 |
+
if layer_idx is None:
|
215 |
+
logger.warning_once(
|
216 |
+
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
|
217 |
+
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
218 |
+
"when creating this class."
|
219 |
+
)
|
220 |
+
|
221 |
+
self.hidden_size = config.hidden_size
|
222 |
+
self.num_heads = config.num_attention_heads
|
223 |
+
self.head_dim = self.hidden_size // self.num_heads
|
224 |
+
self.num_key_value_heads = config.num_key_value_heads
|
225 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
226 |
+
self.max_position_embeddings = config.max_position_embeddings
|
227 |
+
self.rope_theta = config.rope_theta
|
228 |
+
self.is_causal = True
|
229 |
+
self.attention_dropout = config.attention_dropout
|
230 |
+
|
231 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
232 |
+
raise ValueError(
|
233 |
+
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
234 |
+
f" and `num_heads`: {self.num_heads})."
|
235 |
+
)
|
236 |
+
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
|
237 |
+
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
238 |
+
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
|
239 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
240 |
+
|
241 |
+
self.rotary_emb = Qwen2RotaryEmbedding(
|
242 |
+
self.head_dim,
|
243 |
+
max_position_embeddings=self.max_position_embeddings,
|
244 |
+
base=self.rope_theta,
|
245 |
+
)
|
246 |
+
|
247 |
+
"""
|
248 |
+
Phantom
|
249 |
+
"""
|
250 |
+
# Phantom Init
|
251 |
+
self.turn_on_phantom = True
|
252 |
+
self.xattn_query_phantom = XAttention(self.head_dim)
|
253 |
+
self.xattn_key_phantom = XAttention(self.head_dim)
|
254 |
+
self.xattn_value_phantom = XAttention(self.head_dim)
|
255 |
+
self.gating_phantom_1 = nn.Linear(self.head_dim, 1)
|
256 |
+
self.gating_phantom_2 = nn.Linear(self.head_dim, 1)
|
257 |
+
|
258 |
+
|
259 |
+
def forward(
|
260 |
+
self,
|
261 |
+
hidden_states: torch.Tensor,
|
262 |
+
attention_mask: Optional[torch.Tensor] = None,
|
263 |
+
position_ids: Optional[torch.LongTensor] = None,
|
264 |
+
past_key_value: Optional[Cache] = None,
|
265 |
+
output_attentions: bool = False,
|
266 |
+
use_cache: bool = False,
|
267 |
+
cache_position: Optional[torch.LongTensor] = None,
|
268 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
269 |
+
bsz, q_len, _ = hidden_states.size()
|
270 |
+
|
271 |
+
query_states = self.q_proj(hidden_states)
|
272 |
+
key_states = self.k_proj(hidden_states)
|
273 |
+
value_states = self.v_proj(hidden_states)
|
274 |
+
|
275 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
276 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
277 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
278 |
+
|
279 |
+
kv_seq_len = key_states.shape[-2]
|
280 |
+
if past_key_value is not None:
|
281 |
+
if self.layer_idx is None:
|
282 |
+
raise ValueError(
|
283 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
284 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
285 |
+
"with a layer index."
|
286 |
+
)
|
287 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
288 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
289 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
290 |
+
|
291 |
+
if past_key_value is not None:
|
292 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
293 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
294 |
+
|
295 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
296 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
297 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
298 |
+
|
299 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
300 |
+
|
301 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
302 |
+
raise ValueError(
|
303 |
+
f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
304 |
+
f" {attn_weights.size()}"
|
305 |
+
)
|
306 |
+
|
307 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
308 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
309 |
+
attn_weights = attn_weights + causal_mask
|
310 |
+
|
311 |
+
# upcast attention to fp32
|
312 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
313 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
314 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
315 |
+
|
316 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
317 |
+
raise ValueError(
|
318 |
+
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
319 |
+
f" {attn_output.size()}"
|
320 |
+
)
|
321 |
+
|
322 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
323 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
324 |
+
|
325 |
+
attn_output = self.o_proj(attn_output)
|
326 |
+
|
327 |
+
if not output_attentions:
|
328 |
+
attn_weights = None
|
329 |
+
|
330 |
+
return attn_output, attn_weights, past_key_value
|
331 |
+
|
332 |
+
|
333 |
+
class Qwen2FlashAttention2(Qwen2Attention):
|
334 |
+
"""
|
335 |
+
Qwen2 flash attention module, following Qwen2 attention module. This module inherits from `Qwen2Attention`
|
336 |
+
as the weights of the module stays untouched. The only required change would be on the forward pass
|
337 |
+
where it needs to correctly call the public API of flash attention and deal with padding tokens
|
338 |
+
in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
|
339 |
+
config.max_window_layers layers.
|
340 |
+
"""
|
341 |
+
|
342 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
343 |
+
def __init__(self, *args, **kwargs):
|
344 |
+
super().__init__(*args, **kwargs)
|
345 |
+
|
346 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
347 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
348 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
349 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
350 |
+
|
351 |
+
def forward(
|
352 |
+
self,
|
353 |
+
hidden_states: torch.Tensor,
|
354 |
+
attention_mask: Optional[torch.Tensor] = None,
|
355 |
+
position_ids: Optional[torch.LongTensor] = None,
|
356 |
+
past_key_value: Optional[Cache] = None,
|
357 |
+
phantom_position: torch.BoolTensor = None,
|
358 |
+
output_attentions: bool = False,
|
359 |
+
use_cache: bool = False,
|
360 |
+
cache_position: Optional[torch.LongTensor] = None,
|
361 |
+
):
|
362 |
+
bsz, q_len, _ = hidden_states.size()
|
363 |
+
|
364 |
+
query_states = self.q_proj(hidden_states)
|
365 |
+
key_states = self.k_proj(hidden_states)
|
366 |
+
value_states = self.v_proj(hidden_states)
|
367 |
+
|
368 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
369 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
370 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
371 |
+
|
372 |
+
kv_seq_len = key_states.shape[-2]
|
373 |
+
if past_key_value is not None:
|
374 |
+
if self.layer_idx is None:
|
375 |
+
raise ValueError(
|
376 |
+
f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
377 |
+
"for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
378 |
+
"with a layer index."
|
379 |
+
)
|
380 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
381 |
+
|
382 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
383 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
384 |
+
cos, sin = self.rotary_emb(value_states, seq_len=rotary_seq_len)
|
385 |
+
|
386 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
387 |
+
|
388 |
+
use_sliding_windows = (
|
389 |
+
_flash_supports_window_size
|
390 |
+
and getattr(self.config, "sliding_window", None) is not None
|
391 |
+
and kv_seq_len > self.config.sliding_window
|
392 |
+
and self.config.use_sliding_window
|
393 |
+
)
|
394 |
+
|
395 |
+
if not _flash_supports_window_size:
|
396 |
+
logger.warning_once(
|
397 |
+
"The current flash attention version does not support sliding window attention, for a more memory efficient implementation"
|
398 |
+
" make sure to upgrade flash-attn library."
|
399 |
+
)
|
400 |
+
|
401 |
+
if past_key_value is not None:
|
402 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
403 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
404 |
+
if (
|
405 |
+
getattr(self.config, "sliding_window", None) is not None
|
406 |
+
and kv_seq_len > self.config.sliding_window
|
407 |
+
and cache_has_contents
|
408 |
+
):
|
409 |
+
slicing_tokens = 1 - self.config.sliding_window
|
410 |
+
|
411 |
+
past_key = past_key_value[self.layer_idx][0]
|
412 |
+
past_value = past_key_value[self.layer_idx][1]
|
413 |
+
|
414 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
415 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
416 |
+
|
417 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
418 |
+
raise ValueError(
|
419 |
+
f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
420 |
+
f" {past_key.shape}"
|
421 |
+
)
|
422 |
+
|
423 |
+
if attention_mask is not None:
|
424 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
425 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
426 |
+
|
427 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
428 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
429 |
+
|
430 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
431 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
432 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
433 |
+
dropout_rate = 0.0 if not self.training else self.attention_dropout
|
434 |
+
|
435 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
436 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
437 |
+
# cast them back in float16 just to be sure everything works as expected.
|
438 |
+
input_dtype = query_states.dtype
|
439 |
+
if input_dtype == torch.float32:
|
440 |
+
if torch.is_autocast_enabled():
|
441 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
442 |
+
# Handle the case where the model is quantized
|
443 |
+
elif hasattr(self.config, "_pre_quantization_dtype"):
|
444 |
+
target_dtype = self.config._pre_quantization_dtype
|
445 |
+
else:
|
446 |
+
target_dtype = self.q_proj.weight.dtype
|
447 |
+
|
448 |
+
logger.warning_once(
|
449 |
+
f"The input hidden states seems to be silently casted in float32, this might be related to"
|
450 |
+
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
451 |
+
f" {target_dtype}."
|
452 |
+
)
|
453 |
+
|
454 |
+
query_states = query_states.to(target_dtype)
|
455 |
+
key_states = key_states.to(target_dtype)
|
456 |
+
value_states = value_states.to(target_dtype)
|
457 |
+
|
458 |
+
# Reashape to the expected shape for Flash Attention
|
459 |
+
query_states = query_states.transpose(1, 2)
|
460 |
+
key_states = key_states.transpose(1, 2)
|
461 |
+
value_states = value_states.transpose(1, 2)
|
462 |
+
|
463 |
+
attn_output = self._flash_attention_forward(
|
464 |
+
query_states,
|
465 |
+
key_states,
|
466 |
+
value_states,
|
467 |
+
attention_mask,
|
468 |
+
q_len,
|
469 |
+
phantom_position,
|
470 |
+
dropout=dropout_rate,
|
471 |
+
use_sliding_windows=use_sliding_windows,
|
472 |
+
)
|
473 |
+
|
474 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
475 |
+
attn_output = self.o_proj(attn_output)
|
476 |
+
|
477 |
+
if not output_attentions:
|
478 |
+
attn_weights = None
|
479 |
+
|
480 |
+
return attn_output, attn_weights, past_key_value
|
481 |
+
|
482 |
+
def _flash_attention_forward(
|
483 |
+
self,
|
484 |
+
query_states,
|
485 |
+
key_states,
|
486 |
+
value_states,
|
487 |
+
attention_mask,
|
488 |
+
query_length,
|
489 |
+
phantom_position,
|
490 |
+
dropout=0.0,
|
491 |
+
softmax_scale=None,
|
492 |
+
use_sliding_windows=False,
|
493 |
+
):
|
494 |
+
"""
|
495 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
496 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
497 |
+
|
498 |
+
Args:
|
499 |
+
query_states (`torch.Tensor`):
|
500 |
+
Input query states to be passed to Flash Attention API
|
501 |
+
key_states (`torch.Tensor`):
|
502 |
+
Input key states to be passed to Flash Attention API
|
503 |
+
value_states (`torch.Tensor`):
|
504 |
+
Input value states to be passed to Flash Attention API
|
505 |
+
attention_mask (`torch.Tensor`):
|
506 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
507 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
508 |
+
dropout (`float`):
|
509 |
+
Attention dropout
|
510 |
+
softmax_scale (`float`, *optional*):
|
511 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
512 |
+
use_sliding_windows (`bool`, *optional*):
|
513 |
+
Whether to activate sliding window attention.
|
514 |
+
"""
|
515 |
+
|
516 |
+
"""
|
517 |
+
Phantom
|
518 |
+
"""
|
519 |
+
if self.turn_on_phantom:
|
520 |
+
|
521 |
+
# [Important] softmax_scale
|
522 |
+
softmax_scale = 1 / math.sqrt(query_states.shape[-1])
|
523 |
+
|
524 |
+
query_states_phantom = []
|
525 |
+
key_states_phantom = []
|
526 |
+
value_states_phantom = []
|
527 |
+
for index, pos in enumerate(phantom_position):
|
528 |
+
if query_states.shape[1] > 1:
|
529 |
+
query_states_phantom.append(query_states[index][pos])
|
530 |
+
key_states_phantom.append(key_states[index][pos])
|
531 |
+
value_states_phantom.append(value_states[index][pos])
|
532 |
+
|
533 |
+
# saving phantom qkv for inference
|
534 |
+
self.query_states_phantom = query_states_phantom
|
535 |
+
self.key_states_phantom = key_states_phantom
|
536 |
+
self.value_states_phantom = value_states_phantom
|
537 |
+
|
538 |
+
# phantom qkv: list to tensor
|
539 |
+
query_states_phantom = torch.stack(self.query_states_phantom)
|
540 |
+
key_states_phantom = torch.stack(self.key_states_phantom)
|
541 |
+
value_states_phantom = torch.stack(self.value_states_phantom)
|
542 |
+
|
543 |
+
# phantom qkv: 1 -> N (sequence)
|
544 |
+
query_states_phantom = self.xattn_query_phantom(q=query_states, k=query_states_phantom, v=query_states_phantom)
|
545 |
+
key_states_phantom = self.xattn_key_phantom(q=key_states, k=key_states_phantom, v=key_states_phantom)
|
546 |
+
value_states_phantom = self.xattn_value_phantom(q=value_states, k=value_states_phantom, v=value_states_phantom, is_residual=True)
|
547 |
+
|
548 |
+
# concat original qkv and phantom qkv for hidden-dimension / heads
|
549 |
+
query_states = torch.cat([query_states, query_states_phantom], dim=3)
|
550 |
+
key_states = torch.cat([key_states, key_states_phantom], dim=3)
|
551 |
+
value_states = torch.cat([value_states, value_states_phantom], dim=3)
|
552 |
+
|
553 |
+
if not self._flash_attn_uses_top_left_mask:
|
554 |
+
causal = self.is_causal
|
555 |
+
else:
|
556 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
557 |
+
causal = self.is_causal and query_length != 1
|
558 |
+
|
559 |
+
# Decide whether to use SWA or not by layer index.
|
560 |
+
if use_sliding_windows and self.layer_idx >= self.config.max_window_layers:
|
561 |
+
use_sliding_windows = False
|
562 |
+
|
563 |
+
# Contains at least one padding token in the sequence
|
564 |
+
if attention_mask is not None:
|
565 |
+
batch_size = query_states.shape[0]
|
566 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
567 |
+
query_states, key_states, value_states, attention_mask, query_length
|
568 |
+
)
|
569 |
+
|
570 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
571 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
572 |
+
|
573 |
+
if not use_sliding_windows:
|
574 |
+
attn_output_unpad = flash_attn_varlen_func(
|
575 |
+
query_states,
|
576 |
+
key_states,
|
577 |
+
value_states,
|
578 |
+
cu_seqlens_q=cu_seqlens_q,
|
579 |
+
cu_seqlens_k=cu_seqlens_k,
|
580 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
581 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
582 |
+
dropout_p=dropout,
|
583 |
+
softmax_scale=softmax_scale,
|
584 |
+
causal=causal,
|
585 |
+
)
|
586 |
+
else:
|
587 |
+
attn_output_unpad = flash_attn_varlen_func(
|
588 |
+
query_states,
|
589 |
+
key_states,
|
590 |
+
value_states,
|
591 |
+
cu_seqlens_q=cu_seqlens_q,
|
592 |
+
cu_seqlens_k=cu_seqlens_k,
|
593 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
594 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
595 |
+
dropout_p=dropout,
|
596 |
+
softmax_scale=softmax_scale,
|
597 |
+
causal=causal,
|
598 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
599 |
+
)
|
600 |
+
|
601 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
602 |
+
else:
|
603 |
+
if not use_sliding_windows:
|
604 |
+
attn_output = flash_attn_func(
|
605 |
+
query_states,
|
606 |
+
key_states,
|
607 |
+
value_states,
|
608 |
+
dropout,
|
609 |
+
softmax_scale=softmax_scale,
|
610 |
+
causal=causal,
|
611 |
+
)
|
612 |
+
else:
|
613 |
+
attn_output = flash_attn_func(
|
614 |
+
query_states,
|
615 |
+
key_states,
|
616 |
+
value_states,
|
617 |
+
dropout,
|
618 |
+
softmax_scale=softmax_scale,
|
619 |
+
causal=causal,
|
620 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
621 |
+
)
|
622 |
+
"""
|
623 |
+
Phantom
|
624 |
+
"""
|
625 |
+
if self.turn_on_phantom:
|
626 |
+
half_dim = attn_output.shape[-1] // 2
|
627 |
+
half1_o = attn_output[...,:half_dim]
|
628 |
+
half2_o = attn_output[...,half_dim:]
|
629 |
+
weight1 = self.gating_phantom_1(half1_o)
|
630 |
+
weight2 = self.gating_phantom_2(half2_o)
|
631 |
+
weight_norm = weight1.exp() / (weight1.exp() + weight2.exp())
|
632 |
+
attn_output = weight_norm * half1_o + (1-weight_norm) * half2_o
|
633 |
+
return attn_output
|
634 |
+
|
635 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
636 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
637 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
638 |
+
|
639 |
+
# On the first iteration we need to properly re-create the padding mask
|
640 |
+
# by slicing it on the proper place
|
641 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
642 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
643 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
644 |
+
|
645 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
646 |
+
|
647 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
648 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
649 |
+
|
650 |
+
if query_length == kv_seq_len:
|
651 |
+
query_layer = index_first_axis(
|
652 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
653 |
+
)
|
654 |
+
cu_seqlens_q = cu_seqlens_k
|
655 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
656 |
+
indices_q = indices_k
|
657 |
+
elif query_length == 1:
|
658 |
+
max_seqlen_in_batch_q = 1
|
659 |
+
cu_seqlens_q = torch.arange(
|
660 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
661 |
+
) # There is a memcpy here, that is very bad.
|
662 |
+
indices_q = cu_seqlens_q[:-1]
|
663 |
+
query_layer = query_layer.squeeze(1)
|
664 |
+
else:
|
665 |
+
# The -q_len: slice assumes left padding.
|
666 |
+
attention_mask = attention_mask[:, -query_length:]
|
667 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
668 |
+
|
669 |
+
return (
|
670 |
+
query_layer,
|
671 |
+
key_layer,
|
672 |
+
value_layer,
|
673 |
+
indices_q,
|
674 |
+
(cu_seqlens_q, cu_seqlens_k),
|
675 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
676 |
+
)
|
677 |
+
|
678 |
+
|
679 |
+
# Copied from transformers.models.mixtral.modeling_mixtral.MixtralSdpaAttention with Mixtral->Qwen2
|
680 |
+
class Qwen2SdpaAttention(Qwen2Attention):
|
681 |
+
"""
|
682 |
+
Qwen2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
683 |
+
`Qwen2Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
684 |
+
SDPA API.
|
685 |
+
"""
|
686 |
+
|
687 |
+
# Adapted from Qwen2Attention.forward
|
688 |
+
def forward(
|
689 |
+
self,
|
690 |
+
hidden_states: torch.Tensor,
|
691 |
+
attention_mask: Optional[torch.Tensor] = None,
|
692 |
+
position_ids: Optional[torch.LongTensor] = None,
|
693 |
+
past_key_value: Optional[Cache] = None,
|
694 |
+
phantom_position: torch.BoolTensor = None,
|
695 |
+
output_attentions: bool = False,
|
696 |
+
use_cache: bool = False,
|
697 |
+
cache_position: Optional[torch.LongTensor] = None,
|
698 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
699 |
+
if output_attentions:
|
700 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
701 |
+
logger.warning_once(
|
702 |
+
"Qwen2Model is using Qwen2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
703 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
704 |
+
)
|
705 |
+
return super().forward(
|
706 |
+
hidden_states=hidden_states,
|
707 |
+
attention_mask=attention_mask,
|
708 |
+
position_ids=position_ids,
|
709 |
+
past_key_value=past_key_value,
|
710 |
+
output_attentions=output_attentions,
|
711 |
+
use_cache=use_cache,
|
712 |
+
)
|
713 |
+
|
714 |
+
bsz, q_len, _ = hidden_states.size()
|
715 |
+
|
716 |
+
query_states = self.q_proj(hidden_states)
|
717 |
+
key_states = self.k_proj(hidden_states)
|
718 |
+
value_states = self.v_proj(hidden_states)
|
719 |
+
|
720 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
721 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
722 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
723 |
+
|
724 |
+
kv_seq_len = key_states.shape[-2]
|
725 |
+
if past_key_value is not None:
|
726 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
727 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
728 |
+
|
729 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
730 |
+
|
731 |
+
if past_key_value is not None:
|
732 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
|
733 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
734 |
+
|
735 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
736 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
737 |
+
|
738 |
+
causal_mask = attention_mask
|
739 |
+
if attention_mask is not None: # no matter the length, we just slice it
|
740 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
741 |
+
|
742 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
743 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
744 |
+
if query_states.device.type == "cuda" and attention_mask is not None:
|
745 |
+
query_states = query_states.contiguous()
|
746 |
+
key_states = key_states.contiguous()
|
747 |
+
value_states = value_states.contiguous()
|
748 |
+
|
749 |
+
"""
|
750 |
+
Phantom
|
751 |
+
"""
|
752 |
+
if self.turn_on_phantom:
|
753 |
+
|
754 |
+
# Phantom Dimension Conversion for sdpa dimension
|
755 |
+
query_states = query_states.transpose(1, 2).contiguous()
|
756 |
+
key_states = key_states.transpose(1, 2).contiguous()
|
757 |
+
value_states = value_states.transpose(1, 2).contiguous()
|
758 |
+
|
759 |
+
# [Important] softmax_scale
|
760 |
+
softmax_scale = 1 / math.sqrt(query_states.shape[-1])
|
761 |
+
|
762 |
+
query_states_phantom = []
|
763 |
+
key_states_phantom = []
|
764 |
+
value_states_phantom = []
|
765 |
+
for index, pos in enumerate(phantom_position):
|
766 |
+
if query_states.shape[1] > 1:
|
767 |
+
query_states_phantom.append(query_states[index][pos])
|
768 |
+
key_states_phantom.append(key_states[index][pos])
|
769 |
+
value_states_phantom.append(value_states[index][pos])
|
770 |
+
|
771 |
+
# saving phantom qkv for inference
|
772 |
+
self.query_states_phantom = query_states_phantom
|
773 |
+
self.key_states_phantom = key_states_phantom
|
774 |
+
self.value_states_phantom = value_states_phantom
|
775 |
+
|
776 |
+
# phantom qkv: list to tensor
|
777 |
+
query_states_phantom = torch.stack(self.query_states_phantom)
|
778 |
+
key_states_phantom = torch.stack(self.key_states_phantom)
|
779 |
+
value_states_phantom = torch.stack(self.value_states_phantom)
|
780 |
+
|
781 |
+
# phantom qkv: 1 -> N (sequence)
|
782 |
+
query_states_phantom = self.xattn_query_phantom(q=query_states, k=query_states_phantom, v=query_states_phantom)
|
783 |
+
key_states_phantom = self.xattn_key_phantom(q=key_states, k=key_states_phantom, v=key_states_phantom)
|
784 |
+
value_states_phantom = self.xattn_value_phantom(q=value_states, k=value_states_phantom, v=value_states_phantom, is_residual=True)
|
785 |
+
|
786 |
+
# concat original qkv and phantom qkv for hidden-dimension / heads
|
787 |
+
query_states = torch.cat([query_states, query_states_phantom], dim=3)
|
788 |
+
key_states = torch.cat([key_states, key_states_phantom], dim=3)
|
789 |
+
value_states = torch.cat([value_states, value_states_phantom], dim=3)
|
790 |
+
|
791 |
+
# Phantom Dimension Conversion for sdpa dimension
|
792 |
+
query_states = query_states.transpose(1, 2).contiguous()
|
793 |
+
key_states = key_states.transpose(1, 2).contiguous()
|
794 |
+
value_states = value_states.transpose(1, 2).contiguous()
|
795 |
+
|
796 |
+
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
|
797 |
+
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
|
798 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
799 |
+
is_causal = True if causal_mask is None and q_len > 1 else False
|
800 |
+
|
801 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
802 |
+
query_states,
|
803 |
+
key_states,
|
804 |
+
value_states,
|
805 |
+
attn_mask=causal_mask,
|
806 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
807 |
+
is_causal=is_causal,
|
808 |
+
scale=softmax_scale
|
809 |
+
)
|
810 |
+
|
811 |
+
"""
|
812 |
+
Phantom
|
813 |
+
"""
|
814 |
+
if self.turn_on_phantom:
|
815 |
+
attn_output = attn_output.transpose(1, 2).contiguous() # for sdpa dimension
|
816 |
+
half_dim = attn_output.shape[-1] // 2
|
817 |
+
half1_o = attn_output[...,:half_dim]
|
818 |
+
half2_o = attn_output[...,half_dim:]
|
819 |
+
weight1 = self.gating_phantom_1(half1_o)
|
820 |
+
weight2 = self.gating_phantom_2(half2_o)
|
821 |
+
weight_norm = weight1.exp() / (weight1.exp() + weight2.exp())
|
822 |
+
attn_output = weight_norm * half1_o + (1-weight_norm) * half2_o
|
823 |
+
attn_output = attn_output.transpose(1, 2).contiguous() # for sdpa dimension
|
824 |
+
|
825 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
826 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
827 |
+
|
828 |
+
attn_output = self.o_proj(attn_output)
|
829 |
+
|
830 |
+
return attn_output, None, past_key_value
|
831 |
+
|
832 |
+
|
833 |
+
QWEN2_ATTENTION_CLASSES = {
|
834 |
+
"eager": Qwen2Attention,
|
835 |
+
"flash_attention_2": Qwen2FlashAttention2,
|
836 |
+
"sdpa": Qwen2SdpaAttention,
|
837 |
+
}
|
838 |
+
|
839 |
+
|
840 |
+
class Qwen2DecoderLayer(nn.Module):
|
841 |
+
def __init__(self, config: Qwen2Config, layer_idx: int):
|
842 |
+
super().__init__()
|
843 |
+
self.hidden_size = config.hidden_size
|
844 |
+
|
845 |
+
if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
|
846 |
+
logger.warning_once(
|
847 |
+
f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
|
848 |
+
"unexpected results may be encountered."
|
849 |
+
)
|
850 |
+
self.self_attn = QWEN2_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
851 |
+
|
852 |
+
self.mlp = Qwen2MLP(config)
|
853 |
+
self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
854 |
+
self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
855 |
+
|
856 |
+
def forward(
|
857 |
+
self,
|
858 |
+
hidden_states: torch.Tensor,
|
859 |
+
attention_mask: Optional[torch.Tensor] = None,
|
860 |
+
position_ids: Optional[torch.LongTensor] = None,
|
861 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
862 |
+
phantom_position: torch.BoolTensor = None,
|
863 |
+
output_attentions: Optional[bool] = False,
|
864 |
+
use_cache: Optional[bool] = False,
|
865 |
+
cache_position: Optional[torch.LongTensor] = None,
|
866 |
+
**kwargs,
|
867 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
868 |
+
"""
|
869 |
+
Args:
|
870 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
871 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
872 |
+
`(batch, sequence_length)` where padding elements are indicated by 0.
|
873 |
+
output_attentions (`bool`, *optional*):
|
874 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
875 |
+
returned tensors for more detail.
|
876 |
+
use_cache (`bool`, *optional*):
|
877 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
878 |
+
(see `past_key_values`).
|
879 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
880 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
881 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
882 |
+
kwargs (`dict`, *optional*):
|
883 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
884 |
+
into the model
|
885 |
+
"""
|
886 |
+
|
887 |
+
residual = hidden_states
|
888 |
+
|
889 |
+
hidden_states = self.input_layernorm(hidden_states)
|
890 |
+
|
891 |
+
# Self Attention
|
892 |
+
hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
893 |
+
hidden_states=hidden_states,
|
894 |
+
attention_mask=attention_mask,
|
895 |
+
position_ids=position_ids,
|
896 |
+
past_key_value=past_key_value,
|
897 |
+
phantom_position=phantom_position,
|
898 |
+
output_attentions=output_attentions,
|
899 |
+
use_cache=use_cache,
|
900 |
+
cache_position=cache_position,
|
901 |
+
)
|
902 |
+
hidden_states = residual + hidden_states
|
903 |
+
|
904 |
+
# Fully Connected
|
905 |
+
residual = hidden_states
|
906 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
907 |
+
hidden_states = self.mlp(hidden_states)
|
908 |
+
hidden_states = residual + hidden_states
|
909 |
+
|
910 |
+
outputs = (hidden_states,)
|
911 |
+
|
912 |
+
if output_attentions:
|
913 |
+
outputs += (self_attn_weights,)
|
914 |
+
|
915 |
+
if use_cache:
|
916 |
+
outputs += (present_key_value,)
|
917 |
+
|
918 |
+
return outputs
|
919 |
+
|
920 |
+
|
921 |
+
QWEN2_START_DOCSTRING = r"""
|
922 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
923 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
924 |
+
etc.)
|
925 |
+
|
926 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
927 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
928 |
+
and behavior.
|
929 |
+
|
930 |
+
Parameters:
|
931 |
+
config ([`Qwen2Config`]):
|
932 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
933 |
+
load the weights associated with the model, only the configuration. Check out the
|
934 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
935 |
+
"""
|
936 |
+
|
937 |
+
|
938 |
+
@add_start_docstrings(
|
939 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
940 |
+
QWEN2_START_DOCSTRING,
|
941 |
+
)
|
942 |
+
class Qwen2PreTrainedModel(PreTrainedModel):
|
943 |
+
config_class = Qwen2Config
|
944 |
+
base_model_prefix = "model"
|
945 |
+
supports_gradient_checkpointing = True
|
946 |
+
_no_split_modules = ["Qwen2DecoderLayer"]
|
947 |
+
_skip_keys_device_placement = "past_key_values"
|
948 |
+
_supports_flash_attn_2 = True
|
949 |
+
_supports_sdpa = True
|
950 |
+
_supports_cache_class = True
|
951 |
+
|
952 |
+
def _init_weights(self, module):
|
953 |
+
std = self.config.initializer_range
|
954 |
+
if isinstance(module, nn.Linear):
|
955 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
956 |
+
if module.bias is not None:
|
957 |
+
module.bias.data.zero_()
|
958 |
+
elif isinstance(module, nn.Embedding):
|
959 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
960 |
+
if module.padding_idx is not None:
|
961 |
+
module.weight.data[module.padding_idx].zero_()
|
962 |
+
|
963 |
+
|
964 |
+
QWEN2_INPUTS_DOCSTRING = r"""
|
965 |
+
Args:
|
966 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
967 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
968 |
+
it.
|
969 |
+
|
970 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
971 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
972 |
+
|
973 |
+
[What are input IDs?](../glossary#input-ids)
|
974 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
975 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
976 |
+
|
977 |
+
- 1 for tokens that are **not masked**,
|
978 |
+
- 0 for tokens that are **masked**.
|
979 |
+
|
980 |
+
[What are attention masks?](../glossary#attention-mask)
|
981 |
+
|
982 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
983 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
984 |
+
|
985 |
+
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
986 |
+
`past_key_values`).
|
987 |
+
|
988 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
989 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
990 |
+
information on the default strategy.
|
991 |
+
|
992 |
+
- 1 indicates the head is **not masked**,
|
993 |
+
- 0 indicates the head is **masked**.
|
994 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
995 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
996 |
+
config.n_positions - 1]`.
|
997 |
+
|
998 |
+
[What are position IDs?](../glossary#position-ids)
|
999 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
1000 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
1001 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
1002 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
1003 |
+
|
1004 |
+
Two formats are allowed:
|
1005 |
+
- a [`~cache_utils.Cache`] instance;
|
1006 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
1007 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
1008 |
+
cache format.
|
1009 |
+
|
1010 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
1011 |
+
legacy cache format will be returned.
|
1012 |
+
|
1013 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
1014 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
1015 |
+
of shape `(batch_size, sequence_length)`.
|
1016 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1017 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
1018 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
1019 |
+
model's internal embedding lookup matrix.
|
1020 |
+
use_cache (`bool`, *optional*):
|
1021 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1022 |
+
`past_key_values`).
|
1023 |
+
output_attentions (`bool`, *optional*):
|
1024 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
1025 |
+
tensors for more detail.
|
1026 |
+
output_hidden_states (`bool`, *optional*):
|
1027 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
1028 |
+
more detail.
|
1029 |
+
return_dict (`bool`, *optional*):
|
1030 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1031 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
1032 |
+
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
1033 |
+
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
1034 |
+
the complete sequence length.
|
1035 |
+
"""
|
1036 |
+
|
1037 |
+
|
1038 |
+
@add_start_docstrings(
|
1039 |
+
"The bare Qwen2 Model outputting raw hidden-states without any specific head on top.",
|
1040 |
+
QWEN2_START_DOCSTRING,
|
1041 |
+
)
|
1042 |
+
class Qwen2Model(Qwen2PreTrainedModel):
|
1043 |
+
"""
|
1044 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
|
1045 |
+
|
1046 |
+
Args:
|
1047 |
+
config: Qwen2Config
|
1048 |
+
"""
|
1049 |
+
|
1050 |
+
def __init__(self, config: Qwen2Config):
|
1051 |
+
super().__init__(config)
|
1052 |
+
self.padding_idx = config.pad_token_id
|
1053 |
+
self.vocab_size = config.vocab_size
|
1054 |
+
|
1055 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
1056 |
+
self.layers = nn.ModuleList(
|
1057 |
+
[Qwen2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
1058 |
+
)
|
1059 |
+
self._attn_implementation = config._attn_implementation
|
1060 |
+
self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1061 |
+
|
1062 |
+
self.gradient_checkpointing = False
|
1063 |
+
# Initialize weights and apply final processing
|
1064 |
+
self.post_init()
|
1065 |
+
|
1066 |
+
def get_input_embeddings(self):
|
1067 |
+
return self.embed_tokens
|
1068 |
+
|
1069 |
+
def set_input_embeddings(self, value):
|
1070 |
+
self.embed_tokens = value
|
1071 |
+
|
1072 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
1073 |
+
def forward(
|
1074 |
+
self,
|
1075 |
+
input_ids: torch.LongTensor = None,
|
1076 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1077 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1078 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1079 |
+
phantom_position: torch.BoolTensor = None,
|
1080 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1081 |
+
use_cache: Optional[bool] = None,
|
1082 |
+
output_attentions: Optional[bool] = None,
|
1083 |
+
output_hidden_states: Optional[bool] = None,
|
1084 |
+
return_dict: Optional[bool] = None,
|
1085 |
+
cache_position: Optional[torch.LongTensor] = None,
|
1086 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
1087 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1088 |
+
output_hidden_states = (
|
1089 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1090 |
+
)
|
1091 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1092 |
+
|
1093 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1094 |
+
|
1095 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
1096 |
+
raise ValueError(
|
1097 |
+
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
1098 |
+
)
|
1099 |
+
|
1100 |
+
if self.gradient_checkpointing and self.training:
|
1101 |
+
if use_cache:
|
1102 |
+
logger.warning_once(
|
1103 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
1104 |
+
)
|
1105 |
+
use_cache = False
|
1106 |
+
|
1107 |
+
use_legacy_cache = False
|
1108 |
+
if use_cache and not isinstance(past_key_values, Cache):
|
1109 |
+
use_legacy_cache = True
|
1110 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
1111 |
+
logger.warning_once(
|
1112 |
+
"We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
|
1113 |
+
"Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
|
1114 |
+
)
|
1115 |
+
|
1116 |
+
if inputs_embeds is None:
|
1117 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1118 |
+
|
1119 |
+
if cache_position is None:
|
1120 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
1121 |
+
cache_position = torch.arange(
|
1122 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
1123 |
+
)
|
1124 |
+
if position_ids is None:
|
1125 |
+
position_ids = cache_position.unsqueeze(0)
|
1126 |
+
|
1127 |
+
causal_mask = self._update_causal_mask(
|
1128 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
1129 |
+
)
|
1130 |
+
|
1131 |
+
hidden_states = inputs_embeds
|
1132 |
+
|
1133 |
+
# decoder layers
|
1134 |
+
all_hidden_states = () if output_hidden_states else None
|
1135 |
+
all_self_attns = () if output_attentions else None
|
1136 |
+
next_decoder_cache = None
|
1137 |
+
|
1138 |
+
for decoder_layer in self.layers:
|
1139 |
+
if output_hidden_states:
|
1140 |
+
all_hidden_states += (hidden_states,)
|
1141 |
+
|
1142 |
+
if self.gradient_checkpointing and self.training:
|
1143 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1144 |
+
decoder_layer.__call__,
|
1145 |
+
hidden_states,
|
1146 |
+
causal_mask,
|
1147 |
+
position_ids,
|
1148 |
+
past_key_values,
|
1149 |
+
phantom_position,
|
1150 |
+
output_attentions,
|
1151 |
+
use_cache,
|
1152 |
+
cache_position,
|
1153 |
+
)
|
1154 |
+
else:
|
1155 |
+
layer_outputs = decoder_layer(
|
1156 |
+
hidden_states,
|
1157 |
+
attention_mask=causal_mask,
|
1158 |
+
position_ids=position_ids,
|
1159 |
+
past_key_value=past_key_values,
|
1160 |
+
phantom_position=phantom_position,
|
1161 |
+
output_attentions=output_attentions,
|
1162 |
+
use_cache=use_cache,
|
1163 |
+
cache_position=cache_position,
|
1164 |
+
)
|
1165 |
+
|
1166 |
+
hidden_states = layer_outputs[0]
|
1167 |
+
|
1168 |
+
if use_cache:
|
1169 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
1170 |
+
|
1171 |
+
if output_attentions:
|
1172 |
+
all_self_attns += (layer_outputs[1],)
|
1173 |
+
|
1174 |
+
hidden_states = self.norm(hidden_states)
|
1175 |
+
|
1176 |
+
# add hidden states from the last decoder layer
|
1177 |
+
if output_hidden_states:
|
1178 |
+
all_hidden_states += (hidden_states,)
|
1179 |
+
|
1180 |
+
next_cache = None
|
1181 |
+
if use_cache:
|
1182 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
1183 |
+
|
1184 |
+
if not return_dict:
|
1185 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1186 |
+
return BaseModelOutputWithPast(
|
1187 |
+
last_hidden_state=hidden_states,
|
1188 |
+
past_key_values=next_cache,
|
1189 |
+
hidden_states=all_hidden_states,
|
1190 |
+
attentions=all_self_attns,
|
1191 |
+
)
|
1192 |
+
|
1193 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
|
1194 |
+
def _update_causal_mask(
|
1195 |
+
self,
|
1196 |
+
attention_mask: torch.Tensor,
|
1197 |
+
input_tensor: torch.Tensor,
|
1198 |
+
cache_position: torch.Tensor,
|
1199 |
+
past_key_values: Cache,
|
1200 |
+
output_attentions: bool,
|
1201 |
+
):
|
1202 |
+
# TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
|
1203 |
+
# KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
|
1204 |
+
# (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
|
1205 |
+
# `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
|
1206 |
+
|
1207 |
+
if self.config._attn_implementation == "flash_attention_2":
|
1208 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
1209 |
+
return attention_mask
|
1210 |
+
return None
|
1211 |
+
|
1212 |
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
1213 |
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
1214 |
+
# to infer the attention mask.
|
1215 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
1216 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
1217 |
+
|
1218 |
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
1219 |
+
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
|
1220 |
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
1221 |
+
attention_mask,
|
1222 |
+
inputs_embeds=input_tensor,
|
1223 |
+
past_key_values_length=past_seen_tokens,
|
1224 |
+
is_training=self.training,
|
1225 |
+
):
|
1226 |
+
return None
|
1227 |
+
|
1228 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
1229 |
+
min_dtype = torch.finfo(dtype).min
|
1230 |
+
sequence_length = input_tensor.shape[1]
|
1231 |
+
if using_static_cache:
|
1232 |
+
target_length = past_key_values.get_max_length()
|
1233 |
+
else:
|
1234 |
+
target_length = (
|
1235 |
+
attention_mask.shape[-1]
|
1236 |
+
if isinstance(attention_mask, torch.Tensor)
|
1237 |
+
else past_seen_tokens + sequence_length + 1
|
1238 |
+
)
|
1239 |
+
|
1240 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
1241 |
+
# in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
|
1242 |
+
if attention_mask.max() != 0:
|
1243 |
+
raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
|
1244 |
+
causal_mask = attention_mask
|
1245 |
+
else:
|
1246 |
+
causal_mask = torch.full(
|
1247 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
1248 |
+
)
|
1249 |
+
if sequence_length != 1:
|
1250 |
+
causal_mask = torch.triu(causal_mask, diagonal=1)
|
1251 |
+
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
1252 |
+
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
|
1253 |
+
if attention_mask is not None:
|
1254 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
1255 |
+
mask_length = attention_mask.shape[-1]
|
1256 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
1257 |
+
padding_mask = padding_mask == 0
|
1258 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
1259 |
+
padding_mask, min_dtype
|
1260 |
+
)
|
1261 |
+
if (
|
1262 |
+
self.config._attn_implementation == "sdpa"
|
1263 |
+
and attention_mask is not None
|
1264 |
+
and attention_mask.device.type == "cuda"
|
1265 |
+
and not output_attentions
|
1266 |
+
):
|
1267 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
1268 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
1269 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
1270 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
1271 |
+
|
1272 |
+
return causal_mask
|
1273 |
+
|
1274 |
+
|
1275 |
+
class Qwen2ForCausalLM(Qwen2PreTrainedModel):
|
1276 |
+
_tied_weights_keys = ["lm_head.weight"]
|
1277 |
+
|
1278 |
+
def __init__(self, config):
|
1279 |
+
super().__init__(config)
|
1280 |
+
self.model = Qwen2Model(config)
|
1281 |
+
self.vocab_size = config.vocab_size
|
1282 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1283 |
+
|
1284 |
+
# Initialize weights and apply final processing
|
1285 |
+
self.post_init()
|
1286 |
+
|
1287 |
+
def get_input_embeddings(self):
|
1288 |
+
return self.model.embed_tokens
|
1289 |
+
|
1290 |
+
def set_input_embeddings(self, value):
|
1291 |
+
self.model.embed_tokens = value
|
1292 |
+
|
1293 |
+
def get_output_embeddings(self):
|
1294 |
+
return self.lm_head
|
1295 |
+
|
1296 |
+
def set_output_embeddings(self, new_embeddings):
|
1297 |
+
self.lm_head = new_embeddings
|
1298 |
+
|
1299 |
+
def set_decoder(self, decoder):
|
1300 |
+
self.model = decoder
|
1301 |
+
|
1302 |
+
def get_decoder(self):
|
1303 |
+
return self.model
|
1304 |
+
|
1305 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
1306 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1307 |
+
def forward(
|
1308 |
+
self,
|
1309 |
+
input_ids: torch.LongTensor = None,
|
1310 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1311 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1312 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1313 |
+
phantom_position: torch.BoolTensor = None,
|
1314 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1315 |
+
labels: Optional[torch.LongTensor] = None,
|
1316 |
+
use_cache: Optional[bool] = None,
|
1317 |
+
output_attentions: Optional[bool] = None,
|
1318 |
+
output_hidden_states: Optional[bool] = None,
|
1319 |
+
return_dict: Optional[bool] = None,
|
1320 |
+
cache_position: Optional[torch.LongTensor] = None,
|
1321 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1322 |
+
r"""
|
1323 |
+
Args:
|
1324 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1325 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1326 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1327 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1328 |
+
|
1329 |
+
Returns:
|
1330 |
+
|
1331 |
+
Example:
|
1332 |
+
|
1333 |
+
```python
|
1334 |
+
>>> from transformers import AutoTokenizer, Qwen2ForCausalLM
|
1335 |
+
|
1336 |
+
>>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1337 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1338 |
+
|
1339 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1340 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1341 |
+
|
1342 |
+
>>> # Generate
|
1343 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1344 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1345 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
1346 |
+
```"""
|
1347 |
+
|
1348 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1349 |
+
output_hidden_states = (
|
1350 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1351 |
+
)
|
1352 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1353 |
+
|
1354 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1355 |
+
outputs = self.model(
|
1356 |
+
input_ids=input_ids,
|
1357 |
+
attention_mask=attention_mask,
|
1358 |
+
position_ids=position_ids,
|
1359 |
+
past_key_values=past_key_values,
|
1360 |
+
phantom_position=phantom_position,
|
1361 |
+
inputs_embeds=inputs_embeds,
|
1362 |
+
use_cache=use_cache,
|
1363 |
+
output_attentions=output_attentions,
|
1364 |
+
output_hidden_states=output_hidden_states,
|
1365 |
+
return_dict=return_dict,
|
1366 |
+
cache_position=cache_position,
|
1367 |
+
)
|
1368 |
+
|
1369 |
+
hidden_states = outputs[0]
|
1370 |
+
logits = self.lm_head(hidden_states)
|
1371 |
+
logits = logits.float()
|
1372 |
+
|
1373 |
+
loss = None
|
1374 |
+
if labels is not None:
|
1375 |
+
# Shift so that tokens < n predict n
|
1376 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1377 |
+
shift_labels = labels[..., 1:].contiguous()
|
1378 |
+
# Flatten the tokens
|
1379 |
+
loss_fct = CrossEntropyLoss()
|
1380 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1381 |
+
shift_labels = shift_labels.view(-1)
|
1382 |
+
# Enable model parallelism
|
1383 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1384 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1385 |
+
|
1386 |
+
if not return_dict:
|
1387 |
+
output = (logits,) + outputs[1:]
|
1388 |
+
return (loss,) + output if loss is not None else output
|
1389 |
+
|
1390 |
+
return CausalLMOutputWithPast(
|
1391 |
+
loss=loss,
|
1392 |
+
logits=logits,
|
1393 |
+
past_key_values=outputs.past_key_values,
|
1394 |
+
hidden_states=outputs.hidden_states,
|
1395 |
+
attentions=outputs.attentions,
|
1396 |
+
)
|
1397 |
+
|
1398 |
+
def prepare_inputs_for_generation(
|
1399 |
+
self,
|
1400 |
+
input_ids,
|
1401 |
+
past_key_values=None,
|
1402 |
+
phantom_position=None,
|
1403 |
+
attention_mask=None,
|
1404 |
+
inputs_embeds=None,
|
1405 |
+
cache_position=None,
|
1406 |
+
use_cache=True,
|
1407 |
+
**kwargs,
|
1408 |
+
):
|
1409 |
+
past_length = 0
|
1410 |
+
# Omit tokens covered by past_key_values
|
1411 |
+
if past_key_values is not None:
|
1412 |
+
# Past key values are always initialized with a `Cache` object -> no need for if-else anymore
|
1413 |
+
past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
|
1414 |
+
max_cache_length = (
|
1415 |
+
torch.tensor(past_key_values.get_max_length(), device=input_ids.device)
|
1416 |
+
if past_key_values.get_max_length() is not None
|
1417 |
+
else None
|
1418 |
+
)
|
1419 |
+
cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length)
|
1420 |
+
|
1421 |
+
# Keep only the unprocessed tokens:
|
1422 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1423 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
1424 |
+
# input)
|
1425 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1426 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1427 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1428 |
+
# input_ids based on the past_length.
|
1429 |
+
elif past_length < input_ids.shape[1]:
|
1430 |
+
input_ids = input_ids[:, past_length:]
|
1431 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1432 |
+
|
1433 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1434 |
+
if (
|
1435 |
+
max_cache_length is not None
|
1436 |
+
and attention_mask is not None
|
1437 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1438 |
+
):
|
1439 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1440 |
+
|
1441 |
+
position_ids = kwargs.get("position_ids", None)
|
1442 |
+
if attention_mask is not None and position_ids is None:
|
1443 |
+
# create position_ids on the fly for batch generation
|
1444 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1445 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1446 |
+
if past_key_values:
|
1447 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1448 |
+
|
1449 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1450 |
+
if inputs_embeds is not None and past_length == 0:
|
1451 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1452 |
+
else:
|
1453 |
+
model_inputs = {"input_ids": input_ids}
|
1454 |
+
|
1455 |
+
input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1]
|
1456 |
+
if cache_position is None:
|
1457 |
+
cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device)
|
1458 |
+
elif use_cache:
|
1459 |
+
cache_position = cache_position[-input_length:]
|
1460 |
+
|
1461 |
+
model_inputs.update(
|
1462 |
+
{
|
1463 |
+
"position_ids": position_ids,
|
1464 |
+
"past_key_values": past_key_values,
|
1465 |
+
"phantom_position": phantom_position,
|
1466 |
+
"use_cache": use_cache,
|
1467 |
+
"attention_mask": attention_mask,
|
1468 |
+
"cache_position": cache_position,
|
1469 |
+
}
|
1470 |
+
)
|
1471 |
+
return model_inputs
|
1472 |
+
|
1473 |
+
@staticmethod
|
1474 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1475 |
+
reordered_past = ()
|
1476 |
+
for layer_past in past_key_values:
|
1477 |
+
reordered_past += (
|
1478 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1479 |
+
)
|
1480 |
+
return reordered_past
|
1481 |
+
|
1482 |
+
|
1483 |
+
@add_start_docstrings(
|
1484 |
+
"""
|
1485 |
+
The Qwen2 Model transformer with a sequence classification head on top (linear layer).
|
1486 |
+
|
1487 |
+
[`Qwen2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1488 |
+
(e.g. GPT-2) do.
|
1489 |
+
|
1490 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1491 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1492 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1493 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1494 |
+
each row of the batch).
|
1495 |
+
""",
|
1496 |
+
QWEN2_START_DOCSTRING,
|
1497 |
+
)
|
1498 |
+
class Qwen2ForSequenceClassification(Qwen2PreTrainedModel):
|
1499 |
+
def __init__(self, config):
|
1500 |
+
super().__init__(config)
|
1501 |
+
self.num_labels = config.num_labels
|
1502 |
+
self.model = Qwen2Model(config)
|
1503 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1504 |
+
|
1505 |
+
# Initialize weights and apply final processing
|
1506 |
+
self.post_init()
|
1507 |
+
|
1508 |
+
def get_input_embeddings(self):
|
1509 |
+
return self.model.embed_tokens
|
1510 |
+
|
1511 |
+
def set_input_embeddings(self, value):
|
1512 |
+
self.model.embed_tokens = value
|
1513 |
+
|
1514 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
1515 |
+
def forward(
|
1516 |
+
self,
|
1517 |
+
input_ids: torch.LongTensor = None,
|
1518 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1519 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1520 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1521 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1522 |
+
labels: Optional[torch.LongTensor] = None,
|
1523 |
+
use_cache: Optional[bool] = None,
|
1524 |
+
output_attentions: Optional[bool] = None,
|
1525 |
+
output_hidden_states: Optional[bool] = None,
|
1526 |
+
return_dict: Optional[bool] = None,
|
1527 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1528 |
+
r"""
|
1529 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1530 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1531 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1532 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1533 |
+
"""
|
1534 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1535 |
+
|
1536 |
+
transformer_outputs = self.model(
|
1537 |
+
input_ids,
|
1538 |
+
attention_mask=attention_mask,
|
1539 |
+
position_ids=position_ids,
|
1540 |
+
past_key_values=past_key_values,
|
1541 |
+
inputs_embeds=inputs_embeds,
|
1542 |
+
use_cache=use_cache,
|
1543 |
+
output_attentions=output_attentions,
|
1544 |
+
output_hidden_states=output_hidden_states,
|
1545 |
+
return_dict=return_dict,
|
1546 |
+
)
|
1547 |
+
hidden_states = transformer_outputs[0]
|
1548 |
+
logits = self.score(hidden_states)
|
1549 |
+
|
1550 |
+
if input_ids is not None:
|
1551 |
+
batch_size = input_ids.shape[0]
|
1552 |
+
else:
|
1553 |
+
batch_size = inputs_embeds.shape[0]
|
1554 |
+
|
1555 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1556 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1557 |
+
if self.config.pad_token_id is None:
|
1558 |
+
sequence_lengths = -1
|
1559 |
+
else:
|
1560 |
+
if input_ids is not None:
|
1561 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1562 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1563 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1564 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1565 |
+
else:
|
1566 |
+
sequence_lengths = -1
|
1567 |
+
|
1568 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1569 |
+
|
1570 |
+
loss = None
|
1571 |
+
if labels is not None:
|
1572 |
+
labels = labels.to(logits.device)
|
1573 |
+
if self.config.problem_type is None:
|
1574 |
+
if self.num_labels == 1:
|
1575 |
+
self.config.problem_type = "regression"
|
1576 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1577 |
+
self.config.problem_type = "single_label_classification"
|
1578 |
+
else:
|
1579 |
+
self.config.problem_type = "multi_label_classification"
|
1580 |
+
|
1581 |
+
if self.config.problem_type == "regression":
|
1582 |
+
loss_fct = MSELoss()
|
1583 |
+
if self.num_labels == 1:
|
1584 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1585 |
+
else:
|
1586 |
+
loss = loss_fct(pooled_logits, labels)
|
1587 |
+
elif self.config.problem_type == "single_label_classification":
|
1588 |
+
loss_fct = CrossEntropyLoss()
|
1589 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1590 |
+
elif self.config.problem_type == "multi_label_classification":
|
1591 |
+
loss_fct = BCEWithLogitsLoss()
|
1592 |
+
loss = loss_fct(pooled_logits, labels)
|
1593 |
+
if not return_dict:
|
1594 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1595 |
+
return ((loss,) + output) if loss is not None else output
|
1596 |
+
|
1597 |
+
return SequenceClassifierOutputWithPast(
|
1598 |
+
loss=loss,
|
1599 |
+
logits=pooled_logits,
|
1600 |
+
past_key_values=transformer_outputs.past_key_values,
|
1601 |
+
hidden_states=transformer_outputs.hidden_states,
|
1602 |
+
attentions=transformer_outputs.attentions,
|
1603 |
+
)
|
1604 |
+
|
1605 |
+
|
1606 |
+
@add_start_docstrings(
|
1607 |
+
"""
|
1608 |
+
The Qwen2 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
|
1609 |
+
output) e.g. for Named-Entity-Recognition (NER) tasks.
|
1610 |
+
""",
|
1611 |
+
QWEN2_START_DOCSTRING,
|
1612 |
+
)
|
1613 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForTokenClassification with Llama->Qwen2, LLAMA->QWEN2
|
1614 |
+
class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
|
1615 |
+
def __init__(self, config):
|
1616 |
+
super().__init__(config)
|
1617 |
+
self.num_labels = config.num_labels
|
1618 |
+
self.model = Qwen2Model(config)
|
1619 |
+
if getattr(config, "classifier_dropout", None) is not None:
|
1620 |
+
classifier_dropout = config.classifier_dropout
|
1621 |
+
elif getattr(config, "hidden_dropout", None) is not None:
|
1622 |
+
classifier_dropout = config.hidden_dropout
|
1623 |
+
else:
|
1624 |
+
classifier_dropout = 0.1
|
1625 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1626 |
+
self.score = nn.Linear(config.hidden_size, config.num_labels)
|
1627 |
+
|
1628 |
+
# Initialize weights and apply final processing
|
1629 |
+
self.post_init()
|
1630 |
+
|
1631 |
+
def get_input_embeddings(self):
|
1632 |
+
return self.model.embed_tokens
|
1633 |
+
|
1634 |
+
def set_input_embeddings(self, value):
|
1635 |
+
self.model.embed_tokens = value
|
1636 |
+
|
1637 |
+
@add_start_docstrings_to_model_forward(QWEN2_INPUTS_DOCSTRING)
|
1638 |
+
def forward(
|
1639 |
+
self,
|
1640 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1641 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1642 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1643 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1644 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1645 |
+
labels: Optional[torch.LongTensor] = None,
|
1646 |
+
use_cache: Optional[bool] = None,
|
1647 |
+
output_attentions: Optional[bool] = None,
|
1648 |
+
output_hidden_states: Optional[bool] = None,
|
1649 |
+
return_dict: Optional[bool] = None,
|
1650 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
1651 |
+
r"""
|
1652 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1653 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1654 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1655 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1656 |
+
"""
|
1657 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1658 |
+
|
1659 |
+
outputs = self.model(
|
1660 |
+
input_ids,
|
1661 |
+
attention_mask=attention_mask,
|
1662 |
+
position_ids=position_ids,
|
1663 |
+
past_key_values=past_key_values,
|
1664 |
+
inputs_embeds=inputs_embeds,
|
1665 |
+
use_cache=use_cache,
|
1666 |
+
output_attentions=output_attentions,
|
1667 |
+
output_hidden_states=output_hidden_states,
|
1668 |
+
return_dict=return_dict,
|
1669 |
+
)
|
1670 |
+
sequence_output = outputs[0]
|
1671 |
+
sequence_output = self.dropout(sequence_output)
|
1672 |
+
logits = self.score(sequence_output)
|
1673 |
+
|
1674 |
+
loss = None
|
1675 |
+
if labels is not None:
|
1676 |
+
loss_fct = CrossEntropyLoss()
|
1677 |
+
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
1678 |
+
|
1679 |
+
if not return_dict:
|
1680 |
+
output = (logits,) + outputs[2:]
|
1681 |
+
return ((loss,) + output) if loss is not None else output
|
1682 |
+
|
1683 |
+
return TokenClassifierOutput(
|
1684 |
+
loss=loss,
|
1685 |
+
logits=logits,
|
1686 |
+
hidden_states=outputs.hidden_states,
|
1687 |
+
attentions=outputs.attentions,
|
1688 |
+
)
|
model/arch_1_8b/configuration_intern_vit.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
logger = logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
class InternVisionConfig(PretrainedConfig):
|
11 |
+
r"""
|
12 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
13 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
14 |
+
|
15 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
16 |
+
documentation from [`PretrainedConfig`] for more information.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
num_channels (`int`, *optional*, defaults to 3):
|
20 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
21 |
+
patch_size (`int`, *optional*, defaults to 14):
|
22 |
+
The size (resolution) of each patch.
|
23 |
+
image_size (`int`, *optional*, defaults to 224):
|
24 |
+
The size (resolution) of each image.
|
25 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
26 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
27 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
28 |
+
Dimensionality of the encoder layers and the pooler layer.
|
29 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
30 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
31 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
32 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
33 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
34 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
35 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
36 |
+
Number of hidden layers in the Transformer encoder.
|
37 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
38 |
+
Whether to use flash attention mechanism.
|
39 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
40 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
41 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
42 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
43 |
+
The epsilon used by the layer normalization layers.
|
44 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
45 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
46 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
47 |
+
Dropout rate for stochastic depth.
|
48 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
49 |
+
The dropout ratio for the attention probabilities.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
53 |
+
A factor for layer scale.
|
54 |
+
"""
|
55 |
+
|
56 |
+
model_type = 'intern_vit_300m'
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
num_channels=3,
|
61 |
+
patch_size=14,
|
62 |
+
image_size=224,
|
63 |
+
qkv_bias=False,
|
64 |
+
hidden_size=3200,
|
65 |
+
num_attention_heads=25,
|
66 |
+
intermediate_size=12800,
|
67 |
+
qk_normalization=True,
|
68 |
+
num_hidden_layers=48,
|
69 |
+
use_flash_attn=True,
|
70 |
+
hidden_act='gelu',
|
71 |
+
norm_type='rms_norm',
|
72 |
+
layer_norm_eps=1e-6,
|
73 |
+
dropout=0.0,
|
74 |
+
drop_path_rate=0.0,
|
75 |
+
attention_dropout=0.0,
|
76 |
+
initializer_range=0.02,
|
77 |
+
initializer_factor=0.1,
|
78 |
+
**kwargs,
|
79 |
+
):
|
80 |
+
super().__init__(**kwargs)
|
81 |
+
|
82 |
+
self.hidden_size = hidden_size
|
83 |
+
self.intermediate_size = intermediate_size
|
84 |
+
self.dropout = dropout
|
85 |
+
self.drop_path_rate = drop_path_rate
|
86 |
+
self.num_hidden_layers = num_hidden_layers
|
87 |
+
self.num_attention_heads = num_attention_heads
|
88 |
+
self.num_channels = num_channels
|
89 |
+
self.patch_size = patch_size
|
90 |
+
self.image_size = image_size
|
91 |
+
self.initializer_range = initializer_range
|
92 |
+
self.initializer_factor = initializer_factor
|
93 |
+
self.attention_dropout = attention_dropout
|
94 |
+
self.layer_norm_eps = layer_norm_eps
|
95 |
+
self.hidden_act = hidden_act
|
96 |
+
self.norm_type = norm_type
|
97 |
+
self.qkv_bias = qkv_bias
|
98 |
+
self.qk_normalization = qk_normalization
|
99 |
+
self.use_flash_attn = use_flash_attn
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
103 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
104 |
+
|
105 |
+
if 'vision_config' in config_dict:
|
106 |
+
config_dict = config_dict['vision_config']
|
107 |
+
|
108 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
109 |
+
logger.warning(
|
110 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
111 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
112 |
+
)
|
113 |
+
|
114 |
+
return cls.from_dict(config_dict, **kwargs)
|
model/arch_1_8b/configuration_internlm2.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/configuration_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" InternLM2 model configuration"""
|
17 |
+
|
18 |
+
from transformers.configuration_utils import PretrainedConfig
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
24 |
+
|
25 |
+
|
26 |
+
# Modified from transformers.model.llama.configuration_llama.LlamaConfig
|
27 |
+
class InternLM2Config(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
|
30 |
+
an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
|
32 |
+
|
33 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
34 |
+
documentation from [`PretrainedConfig`] for more information.
|
35 |
+
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
39 |
+
Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
|
40 |
+
`inputs_ids` passed when calling [`InternLM2Model`]
|
41 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
42 |
+
Dimension of the hidden representations.
|
43 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
44 |
+
Dimension of the MLP representations.
|
45 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
46 |
+
Number of hidden layers in the Transformer encoder.
|
47 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
49 |
+
num_key_value_heads (`int`, *optional*):
|
50 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
51 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
52 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
53 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
54 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
55 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
56 |
+
`num_attention_heads`.
|
57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
58 |
+
The non-linear activation function (function or string) in the decoder.
|
59 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
60 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
61 |
+
just in case (e.g., 512 or 1024 or 2048).
|
62 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
63 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
64 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
|
65 |
+
The epsilon used by the rms normalization layers.
|
66 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
68 |
+
relevant if `config.is_decoder=True`.
|
69 |
+
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether to tie weight embeddings
|
71 |
+
Example:
|
72 |
+
|
73 |
+
"""
|
74 |
+
model_type = 'internlm2'
|
75 |
+
_auto_class = 'AutoConfig'
|
76 |
+
|
77 |
+
def __init__( # pylint: disable=W0102
|
78 |
+
self,
|
79 |
+
vocab_size=103168,
|
80 |
+
hidden_size=4096,
|
81 |
+
intermediate_size=11008,
|
82 |
+
num_hidden_layers=32,
|
83 |
+
num_attention_heads=32,
|
84 |
+
num_key_value_heads=None,
|
85 |
+
hidden_act='silu',
|
86 |
+
max_position_embeddings=2048,
|
87 |
+
initializer_range=0.02,
|
88 |
+
rms_norm_eps=1e-6,
|
89 |
+
use_cache=True,
|
90 |
+
pad_token_id=0,
|
91 |
+
bos_token_id=1,
|
92 |
+
eos_token_id=2,
|
93 |
+
tie_word_embeddings=False,
|
94 |
+
bias=True,
|
95 |
+
rope_theta=10000,
|
96 |
+
rope_scaling=None,
|
97 |
+
attn_implementation='eager',
|
98 |
+
**kwargs,
|
99 |
+
):
|
100 |
+
self.vocab_size = vocab_size
|
101 |
+
self.max_position_embeddings = max_position_embeddings
|
102 |
+
self.hidden_size = hidden_size
|
103 |
+
self.intermediate_size = intermediate_size
|
104 |
+
self.num_hidden_layers = num_hidden_layers
|
105 |
+
self.num_attention_heads = num_attention_heads
|
106 |
+
self.bias = bias
|
107 |
+
|
108 |
+
if num_key_value_heads is None:
|
109 |
+
num_key_value_heads = num_attention_heads
|
110 |
+
self.num_key_value_heads = num_key_value_heads
|
111 |
+
|
112 |
+
self.hidden_act = hidden_act
|
113 |
+
self.initializer_range = initializer_range
|
114 |
+
self.rms_norm_eps = rms_norm_eps
|
115 |
+
self.use_cache = use_cache
|
116 |
+
self.rope_theta = rope_theta
|
117 |
+
self.rope_scaling = rope_scaling
|
118 |
+
self._rope_scaling_validation()
|
119 |
+
|
120 |
+
self.attn_implementation = attn_implementation
|
121 |
+
if self.attn_implementation is None:
|
122 |
+
self.attn_implementation = 'eager'
|
123 |
+
super().__init__(
|
124 |
+
pad_token_id=pad_token_id,
|
125 |
+
bos_token_id=bos_token_id,
|
126 |
+
eos_token_id=eos_token_id,
|
127 |
+
tie_word_embeddings=tie_word_embeddings,
|
128 |
+
**kwargs,
|
129 |
+
)
|
130 |
+
|
131 |
+
def _rope_scaling_validation(self):
|
132 |
+
"""
|
133 |
+
Validate the `rope_scaling` configuration.
|
134 |
+
"""
|
135 |
+
if self.rope_scaling is None:
|
136 |
+
return
|
137 |
+
|
138 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
139 |
+
raise ValueError(
|
140 |
+
'`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
|
141 |
+
f'got {self.rope_scaling}'
|
142 |
+
)
|
143 |
+
rope_scaling_type = self.rope_scaling.get('type', None)
|
144 |
+
rope_scaling_factor = self.rope_scaling.get('factor', None)
|
145 |
+
if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
|
146 |
+
raise ValueError(
|
147 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
148 |
+
)
|
149 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
|
150 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
|
model/arch_1_8b/configuration_phantom.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
|
3 |
+
from transformers import LlamaConfig
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
from .configuration_intern_vit import InternVisionConfig
|
8 |
+
from .configuration_internlm2 import InternLM2Config
|
9 |
+
|
10 |
+
logger = logging.get_logger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
class PhantomConfig(PretrainedConfig):
|
14 |
+
model_type = 'phantom'
|
15 |
+
is_composition = True
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
vision_config=None,
|
20 |
+
llm_config=None,
|
21 |
+
use_backbone_lora=0,
|
22 |
+
use_llm_lora=0,
|
23 |
+
force_image_size=None,
|
24 |
+
downsample_ratio=0.5,
|
25 |
+
template=None,
|
26 |
+
dynamic_image_size=False,
|
27 |
+
use_thumbnail=False,
|
28 |
+
min_dynamic_patch=1,
|
29 |
+
max_dynamic_patch=6,
|
30 |
+
**kwargs):
|
31 |
+
super().__init__(**kwargs)
|
32 |
+
|
33 |
+
if vision_config is None:
|
34 |
+
vision_config = {}
|
35 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
36 |
+
|
37 |
+
if llm_config is None:
|
38 |
+
llm_config = {}
|
39 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
40 |
+
|
41 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
42 |
+
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
43 |
+
self.llm_config = LlamaConfig(**llm_config)
|
44 |
+
elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
|
45 |
+
self.llm_config = InternLM2Config(**llm_config)
|
46 |
+
else:
|
47 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
48 |
+
self.use_backbone_lora = use_backbone_lora
|
49 |
+
self.use_llm_lora = use_llm_lora
|
50 |
+
self.force_image_size = force_image_size
|
51 |
+
self.downsample_ratio = downsample_ratio
|
52 |
+
self.template = template
|
53 |
+
self.dynamic_image_size = dynamic_image_size
|
54 |
+
self.use_thumbnail = use_thumbnail
|
55 |
+
self.min_dynamic_patch = min_dynamic_patch
|
56 |
+
self.max_dynamic_patch = max_dynamic_patch
|
57 |
+
|
58 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
59 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
60 |
+
|
61 |
+
def to_dict(self):
|
62 |
+
"""
|
63 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
67 |
+
"""
|
68 |
+
output = copy.deepcopy(self.__dict__)
|
69 |
+
output['vision_config'] = self.vision_config.to_dict()
|
70 |
+
output['llm_config'] = self.llm_config.to_dict()
|
71 |
+
output['model_type'] = self.__class__.model_type
|
72 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
73 |
+
output['use_llm_lora'] = self.use_llm_lora
|
74 |
+
output['force_image_size'] = self.force_image_size
|
75 |
+
output['downsample_ratio'] = self.downsample_ratio
|
76 |
+
output['template'] = self.template
|
77 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
78 |
+
output['use_thumbnail'] = self.use_thumbnail
|
79 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
80 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
81 |
+
|
82 |
+
return output
|
model/arch_1_8b/modeling_intern_vit.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.utils.checkpoint
|
6 |
+
from einops import rearrange
|
7 |
+
from timm.models.layers import DropPath
|
8 |
+
from torch import nn
|
9 |
+
from transformers.activations import ACT2FN
|
10 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
11 |
+
BaseModelOutputWithPooling)
|
12 |
+
from transformers.modeling_utils import PreTrainedModel
|
13 |
+
from transformers.utils import logging
|
14 |
+
|
15 |
+
from .configuration_intern_vit import InternVisionConfig
|
16 |
+
|
17 |
+
try:
|
18 |
+
try: # v1
|
19 |
+
from flash_attn.flash_attn_interface import \
|
20 |
+
flash_attn_unpadded_qkvpacked_func
|
21 |
+
except: # v2
|
22 |
+
from flash_attn.flash_attn_interface import \
|
23 |
+
flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
24 |
+
|
25 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
26 |
+
|
27 |
+
has_flash_attn = True
|
28 |
+
except:
|
29 |
+
print('FlashAttention is not installed.')
|
30 |
+
has_flash_attn = False
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
class FlashAttention(nn.Module):
|
36 |
+
"""Implement the scaled dot product attention with softmax.
|
37 |
+
Arguments
|
38 |
+
---------
|
39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
41 |
+
runtime)
|
42 |
+
attention_dropout: The dropout rate to apply to the attention
|
43 |
+
(default: 0.0)
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
47 |
+
super().__init__()
|
48 |
+
self.softmax_scale = softmax_scale
|
49 |
+
self.dropout_p = attention_dropout
|
50 |
+
|
51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
52 |
+
max_s=None, need_weights=False):
|
53 |
+
"""Implements the multihead softmax attention.
|
54 |
+
Arguments
|
55 |
+
---------
|
56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
57 |
+
if unpadded: (nnz, 3, h, d)
|
58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
59 |
+
"""
|
60 |
+
assert not need_weights
|
61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
62 |
+
assert qkv.is_cuda
|
63 |
+
|
64 |
+
if cu_seqlens is None:
|
65 |
+
batch_size = qkv.shape[0]
|
66 |
+
seqlen = qkv.shape[1]
|
67 |
+
if key_padding_mask is None:
|
68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
69 |
+
max_s = seqlen
|
70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
71 |
+
device=qkv.device)
|
72 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
75 |
+
)
|
76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
77 |
+
else:
|
78 |
+
nheads = qkv.shape[-2]
|
79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
82 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
85 |
+
)
|
86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
87 |
+
indices, batch_size, seqlen),
|
88 |
+
'b s (h d) -> b s h d', h=nheads)
|
89 |
+
else:
|
90 |
+
assert max_s is not None
|
91 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
94 |
+
)
|
95 |
+
|
96 |
+
return output, None
|
97 |
+
|
98 |
+
|
99 |
+
class InternRMSNorm(nn.Module):
|
100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
101 |
+
super().__init__()
|
102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
103 |
+
self.variance_epsilon = eps
|
104 |
+
|
105 |
+
def forward(self, hidden_states):
|
106 |
+
input_dtype = hidden_states.dtype
|
107 |
+
hidden_states = hidden_states.to(torch.float32)
|
108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
110 |
+
return self.weight * hidden_states.to(input_dtype)
|
111 |
+
|
112 |
+
|
113 |
+
try:
|
114 |
+
from apex.normalization import FusedRMSNorm
|
115 |
+
|
116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
117 |
+
|
118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
119 |
+
except ImportError:
|
120 |
+
# using the normal InternRMSNorm
|
121 |
+
pass
|
122 |
+
except Exception:
|
123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
NORM2FN = {
|
128 |
+
'rms_norm': InternRMSNorm,
|
129 |
+
'layer_norm': nn.LayerNorm,
|
130 |
+
}
|
131 |
+
|
132 |
+
|
133 |
+
class InternVisionEmbeddings(nn.Module):
|
134 |
+
def __init__(self, config: InternVisionConfig):
|
135 |
+
super().__init__()
|
136 |
+
self.config = config
|
137 |
+
self.embed_dim = config.hidden_size
|
138 |
+
self.image_size = config.image_size
|
139 |
+
self.patch_size = config.patch_size
|
140 |
+
|
141 |
+
self.class_embedding = nn.Parameter(
|
142 |
+
torch.randn(1, 1, self.embed_dim),
|
143 |
+
)
|
144 |
+
|
145 |
+
self.patch_embedding = nn.Conv2d(
|
146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
147 |
+
)
|
148 |
+
|
149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
150 |
+
self.num_positions = self.num_patches + 1
|
151 |
+
|
152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
153 |
+
|
154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
155 |
+
target_dtype = pos_embed.dtype
|
156 |
+
pos_embed = pos_embed.float().reshape(
|
157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
160 |
+
return pos_embed
|
161 |
+
|
162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
165 |
+
batch_size, _, height, width = patch_embeds.shape
|
166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
169 |
+
position_embedding = torch.cat([
|
170 |
+
self.position_embedding[:, :1, :],
|
171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
172 |
+
], dim=1)
|
173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
174 |
+
return embeddings
|
175 |
+
|
176 |
+
|
177 |
+
class InternAttention(nn.Module):
|
178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
179 |
+
|
180 |
+
def __init__(self, config: InternVisionConfig):
|
181 |
+
super().__init__()
|
182 |
+
self.config = config
|
183 |
+
self.embed_dim = config.hidden_size
|
184 |
+
self.num_heads = config.num_attention_heads
|
185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
186 |
+
if config.use_flash_attn and not has_flash_attn:
|
187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
190 |
+
raise ValueError(
|
191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
192 |
+
f' {self.num_heads}).'
|
193 |
+
)
|
194 |
+
|
195 |
+
self.scale = self.head_dim ** -0.5
|
196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
199 |
+
|
200 |
+
self.qk_normalization = config.qk_normalization
|
201 |
+
|
202 |
+
if self.qk_normalization:
|
203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
205 |
+
|
206 |
+
if self.use_flash_attn:
|
207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
209 |
+
|
210 |
+
def _naive_attn(self, x):
|
211 |
+
B, N, C = x.shape
|
212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
214 |
+
|
215 |
+
if self.qk_normalization:
|
216 |
+
B_, H_, N_, D_ = q.shape
|
217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
219 |
+
|
220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
221 |
+
attn = attn.softmax(dim=-1)
|
222 |
+
attn = self.attn_drop(attn)
|
223 |
+
|
224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
225 |
+
x = self.proj(x)
|
226 |
+
x = self.proj_drop(x)
|
227 |
+
return x
|
228 |
+
|
229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
230 |
+
qkv = self.qkv(x)
|
231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
232 |
+
|
233 |
+
if self.qk_normalization:
|
234 |
+
q, k, v = qkv.unbind(2)
|
235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
238 |
+
|
239 |
+
context, _ = self.inner_attn(
|
240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
241 |
+
)
|
242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
243 |
+
outs = self.proj_drop(outs)
|
244 |
+
return outs
|
245 |
+
|
246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
248 |
+
return x
|
249 |
+
|
250 |
+
|
251 |
+
class InternMLP(nn.Module):
|
252 |
+
def __init__(self, config: InternVisionConfig):
|
253 |
+
super().__init__()
|
254 |
+
self.config = config
|
255 |
+
self.act = ACT2FN[config.hidden_act]
|
256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
258 |
+
|
259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
260 |
+
hidden_states = self.fc1(hidden_states)
|
261 |
+
hidden_states = self.act(hidden_states)
|
262 |
+
hidden_states = self.fc2(hidden_states)
|
263 |
+
return hidden_states
|
264 |
+
|
265 |
+
|
266 |
+
class InternVisionEncoderLayer(nn.Module):
|
267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
268 |
+
super().__init__()
|
269 |
+
self.embed_dim = config.hidden_size
|
270 |
+
self.intermediate_size = config.intermediate_size
|
271 |
+
self.norm_type = config.norm_type
|
272 |
+
|
273 |
+
self.attn = InternAttention(config)
|
274 |
+
self.mlp = InternMLP(config)
|
275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
277 |
+
|
278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
282 |
+
|
283 |
+
def forward(
|
284 |
+
self,
|
285 |
+
hidden_states: torch.Tensor,
|
286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
287 |
+
"""
|
288 |
+
Args:
|
289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
290 |
+
"""
|
291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
|
292 |
+
|
293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
|
294 |
+
|
295 |
+
return hidden_states
|
296 |
+
|
297 |
+
|
298 |
+
class InternVisionEncoder(nn.Module):
|
299 |
+
"""
|
300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
301 |
+
[`InternEncoderLayer`].
|
302 |
+
|
303 |
+
Args:
|
304 |
+
config (`InternConfig`):
|
305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
306 |
+
"""
|
307 |
+
|
308 |
+
def __init__(self, config: InternVisionConfig):
|
309 |
+
super().__init__()
|
310 |
+
self.config = config
|
311 |
+
# stochastic depth decay rule
|
312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
313 |
+
self.layers = nn.ModuleList([
|
314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
315 |
+
self.gradient_checkpointing = False
|
316 |
+
|
317 |
+
def forward(
|
318 |
+
self,
|
319 |
+
inputs_embeds,
|
320 |
+
output_hidden_states: Optional[bool] = None,
|
321 |
+
return_dict: Optional[bool] = None,
|
322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
323 |
+
r"""
|
324 |
+
Args:
|
325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
327 |
+
output_hidden_states (`bool`, *optional*):
|
328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
329 |
+
for more detail.
|
330 |
+
return_dict (`bool`, *optional*):
|
331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
332 |
+
"""
|
333 |
+
output_hidden_states = (
|
334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
335 |
+
)
|
336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
337 |
+
|
338 |
+
encoder_states = () if output_hidden_states else None
|
339 |
+
hidden_states = inputs_embeds
|
340 |
+
|
341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
342 |
+
if output_hidden_states:
|
343 |
+
encoder_states = encoder_states + (hidden_states,)
|
344 |
+
if self.gradient_checkpointing and self.training:
|
345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
346 |
+
encoder_layer,
|
347 |
+
hidden_states)
|
348 |
+
else:
|
349 |
+
layer_outputs = encoder_layer(
|
350 |
+
hidden_states,
|
351 |
+
)
|
352 |
+
hidden_states = layer_outputs
|
353 |
+
|
354 |
+
if output_hidden_states:
|
355 |
+
encoder_states = encoder_states + (hidden_states,)
|
356 |
+
|
357 |
+
if not return_dict:
|
358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
359 |
+
return BaseModelOutput(
|
360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
class InternVisionModel(PreTrainedModel):
|
365 |
+
main_input_name = 'pixel_values'
|
366 |
+
_supports_flash_attn_2 = True
|
367 |
+
config_class = InternVisionConfig
|
368 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
369 |
+
|
370 |
+
def __init__(self, config: InternVisionConfig):
|
371 |
+
super().__init__(config)
|
372 |
+
self.config = config
|
373 |
+
|
374 |
+
self.embeddings = InternVisionEmbeddings(config)
|
375 |
+
self.encoder = InternVisionEncoder(config)
|
376 |
+
|
377 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
378 |
+
pos_emb = self.embeddings.position_embedding
|
379 |
+
_, num_positions, embed_dim = pos_emb.shape
|
380 |
+
cls_emb = pos_emb[:, :1, :]
|
381 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
382 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
383 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
384 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
385 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
386 |
+
self.embeddings.image_size = new_size
|
387 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
388 |
+
|
389 |
+
def get_input_embeddings(self):
|
390 |
+
return self.embeddings
|
391 |
+
|
392 |
+
def forward(
|
393 |
+
self,
|
394 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
395 |
+
output_hidden_states: Optional[bool] = None,
|
396 |
+
return_dict: Optional[bool] = None,
|
397 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
398 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
399 |
+
output_hidden_states = (
|
400 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
401 |
+
)
|
402 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
403 |
+
|
404 |
+
if pixel_values is None and pixel_embeds is None:
|
405 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
406 |
+
|
407 |
+
if pixel_embeds is not None:
|
408 |
+
hidden_states = pixel_embeds
|
409 |
+
else:
|
410 |
+
if len(pixel_values.shape) == 4:
|
411 |
+
hidden_states = self.embeddings(pixel_values)
|
412 |
+
else:
|
413 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
414 |
+
encoder_outputs = self.encoder(
|
415 |
+
inputs_embeds=hidden_states,
|
416 |
+
output_hidden_states=output_hidden_states,
|
417 |
+
return_dict=return_dict,
|
418 |
+
)
|
419 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
420 |
+
pooled_output = last_hidden_state[:, 0, :]
|
421 |
+
|
422 |
+
if not return_dict:
|
423 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
424 |
+
|
425 |
+
return BaseModelOutputWithPooling(
|
426 |
+
last_hidden_state=last_hidden_state,
|
427 |
+
pooler_output=pooled_output,
|
428 |
+
hidden_states=encoder_outputs.hidden_states,
|
429 |
+
attentions=encoder_outputs.attentions,
|
430 |
+
)
|
model/arch_1_8b/modeling_internlm2.py
ADDED
@@ -0,0 +1,1488 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/modeling_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" PyTorch InternLM2 model."""
|
17 |
+
import math
|
18 |
+
import queue
|
19 |
+
import threading
|
20 |
+
import warnings
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.nn.functional as F
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from einops import rearrange
|
27 |
+
from torch import nn
|
28 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
29 |
+
from transformers.activations import ACT2FN
|
30 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
31 |
+
CausalLMOutputWithPast,
|
32 |
+
SequenceClassifierOutputWithPast)
|
33 |
+
from transformers.modeling_utils import PreTrainedModel
|
34 |
+
from transformers.utils import (add_start_docstrings,
|
35 |
+
add_start_docstrings_to_model_forward, logging,
|
36 |
+
replace_return_docstrings)
|
37 |
+
|
38 |
+
try:
|
39 |
+
from transformers.generation.streamers import BaseStreamer
|
40 |
+
except: # noqa # pylint: disable=bare-except
|
41 |
+
BaseStreamer = None
|
42 |
+
|
43 |
+
from .configuration_internlm2 import InternLM2Config
|
44 |
+
|
45 |
+
# Phantom
|
46 |
+
from utils.utils import *
|
47 |
+
|
48 |
+
logger = logging.get_logger(__name__)
|
49 |
+
|
50 |
+
_CONFIG_FOR_DOC = 'InternLM2Config'
|
51 |
+
|
52 |
+
flash_attn_func, flash_attn_varlen_func = None, None
|
53 |
+
pad_input, index_first_axis, unpad_input = None, None, None
|
54 |
+
try:
|
55 |
+
from flash_attn import flash_attn_func as _flash_attn_func
|
56 |
+
from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
|
57 |
+
from flash_attn.bert_padding import index_first_axis as _index_first_axis
|
58 |
+
from flash_attn.bert_padding import pad_input as _pad_input
|
59 |
+
from flash_attn.bert_padding import unpad_input as _unpad_input
|
60 |
+
|
61 |
+
flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
|
62 |
+
pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
|
63 |
+
has_flash_attn = True
|
64 |
+
except:
|
65 |
+
has_flash_attn = False
|
66 |
+
|
67 |
+
|
68 |
+
def _import_flash_attn():
|
69 |
+
global flash_attn_func, flash_attn_varlen_func
|
70 |
+
global pad_input, index_first_axis, unpad_input
|
71 |
+
try:
|
72 |
+
from flash_attn import flash_attn_func as _flash_attn_func
|
73 |
+
from flash_attn import \
|
74 |
+
flash_attn_varlen_func as _flash_attn_varlen_func
|
75 |
+
from flash_attn.bert_padding import \
|
76 |
+
index_first_axis as _index_first_axis
|
77 |
+
from flash_attn.bert_padding import pad_input as _pad_input
|
78 |
+
from flash_attn.bert_padding import unpad_input as _unpad_input
|
79 |
+
flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
|
80 |
+
pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
|
81 |
+
except ImportError:
|
82 |
+
raise ImportError('flash_attn is not installed.')
|
83 |
+
|
84 |
+
|
85 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
86 |
+
def _get_unpad_data(attention_mask):
|
87 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
88 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
89 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
90 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
91 |
+
return (
|
92 |
+
indices,
|
93 |
+
cu_seqlens,
|
94 |
+
max_seqlen_in_batch,
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
99 |
+
def _make_causal_mask(
|
100 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
101 |
+
):
|
102 |
+
"""
|
103 |
+
Make causal mask used for bi-directional self-attention.
|
104 |
+
"""
|
105 |
+
bsz, tgt_len = input_ids_shape
|
106 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
107 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
108 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
109 |
+
mask = mask.to(dtype)
|
110 |
+
|
111 |
+
if past_key_values_length > 0:
|
112 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
113 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
114 |
+
|
115 |
+
|
116 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
117 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
118 |
+
"""
|
119 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
120 |
+
"""
|
121 |
+
bsz, src_len = mask.size()
|
122 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
123 |
+
|
124 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
125 |
+
|
126 |
+
inverted_mask = 1.0 - expanded_mask
|
127 |
+
|
128 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
129 |
+
|
130 |
+
|
131 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
|
132 |
+
class InternLM2RMSNorm(nn.Module):
|
133 |
+
def __init__(self, hidden_size, eps=1e-6):
|
134 |
+
"""
|
135 |
+
InternLM2RMSNorm is equivalent to T5LayerNorm
|
136 |
+
"""
|
137 |
+
super().__init__()
|
138 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
139 |
+
self.variance_epsilon = eps
|
140 |
+
|
141 |
+
def forward(self, hidden_states):
|
142 |
+
input_dtype = hidden_states.dtype
|
143 |
+
hidden_states = hidden_states.to(torch.float32)
|
144 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
145 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
146 |
+
return self.weight * hidden_states.to(input_dtype)
|
147 |
+
|
148 |
+
|
149 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
|
150 |
+
class InternLM2RotaryEmbedding(nn.Module):
|
151 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
152 |
+
super().__init__()
|
153 |
+
|
154 |
+
self.dim = dim
|
155 |
+
self.max_position_embeddings = max_position_embeddings
|
156 |
+
self.base = base
|
157 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
158 |
+
self.register_buffer('inv_freq', inv_freq, persistent=False)
|
159 |
+
|
160 |
+
# Build here to make `torch.jit.trace` work.
|
161 |
+
self._set_cos_sin_cache(
|
162 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
163 |
+
)
|
164 |
+
|
165 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
166 |
+
self.max_seq_len_cached = seq_len
|
167 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
168 |
+
|
169 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
170 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
171 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
172 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
173 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
174 |
+
|
175 |
+
def forward(self, x, seq_len=None):
|
176 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
177 |
+
if seq_len > self.max_seq_len_cached:
|
178 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
|
179 |
+
|
180 |
+
return (
|
181 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
182 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
183 |
+
)
|
184 |
+
|
185 |
+
|
186 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
|
187 |
+
class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
188 |
+
"""InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
189 |
+
|
190 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
191 |
+
self.scaling_factor = scaling_factor
|
192 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
193 |
+
|
194 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
195 |
+
self.max_seq_len_cached = seq_len
|
196 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
197 |
+
t = t / self.scaling_factor
|
198 |
+
|
199 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
200 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
201 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
202 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
203 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
204 |
+
|
205 |
+
|
206 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
|
207 |
+
class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
208 |
+
"""InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
|
209 |
+
Credits to the Reddit users /u/bloc97 and /u/emozilla.
|
210 |
+
"""
|
211 |
+
|
212 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
213 |
+
self.scaling_factor = scaling_factor
|
214 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
215 |
+
|
216 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
217 |
+
self.max_seq_len_cached = seq_len
|
218 |
+
|
219 |
+
if seq_len > self.max_position_embeddings:
|
220 |
+
base = self.base * (
|
221 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
222 |
+
) ** (self.dim / (self.dim - 2))
|
223 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
224 |
+
self.register_buffer('inv_freq', inv_freq, persistent=False)
|
225 |
+
|
226 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
227 |
+
|
228 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
229 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
230 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
231 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
232 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
233 |
+
|
234 |
+
|
235 |
+
# Copied from transformers.model.llama.modeling_llama.rotate_half
|
236 |
+
def rotate_half(x):
|
237 |
+
"""Rotates half the hidden dims of the input."""
|
238 |
+
x1 = x[..., : x.shape[-1] // 2]
|
239 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
240 |
+
return torch.cat((-x2, x1), dim=-1)
|
241 |
+
|
242 |
+
|
243 |
+
# Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
|
244 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
245 |
+
"""Applies Rotary Position Embedding to the query and key tensors."""
|
246 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
247 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
248 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
249 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
250 |
+
return q_embed, k_embed
|
251 |
+
|
252 |
+
|
253 |
+
class InternLM2MLP(nn.Module):
|
254 |
+
def __init__(self, config):
|
255 |
+
super().__init__()
|
256 |
+
self.config = config
|
257 |
+
self.hidden_size = config.hidden_size
|
258 |
+
self.intermediate_size = config.intermediate_size
|
259 |
+
self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
260 |
+
self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
261 |
+
self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
262 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
263 |
+
|
264 |
+
def forward(self, x):
|
265 |
+
down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
|
266 |
+
|
267 |
+
return down_proj
|
268 |
+
|
269 |
+
|
270 |
+
# Copied from transformers.model.llama.modeling_llama.repeat_kv
|
271 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
272 |
+
"""
|
273 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
274 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
275 |
+
"""
|
276 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
277 |
+
if n_rep == 1:
|
278 |
+
return hidden_states
|
279 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
280 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
281 |
+
|
282 |
+
|
283 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaAttention
|
284 |
+
class InternLM2Attention(nn.Module):
|
285 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
286 |
+
|
287 |
+
def __init__(self, config: InternLM2Config):
|
288 |
+
super().__init__()
|
289 |
+
self.config = config
|
290 |
+
self.hidden_size = config.hidden_size
|
291 |
+
self.num_heads = config.num_attention_heads
|
292 |
+
self.head_dim = self.hidden_size // self.num_heads
|
293 |
+
self.num_key_value_heads = config.num_key_value_heads
|
294 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
295 |
+
self.max_position_embeddings = config.max_position_embeddings
|
296 |
+
self.is_causal = True
|
297 |
+
|
298 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
299 |
+
raise ValueError(
|
300 |
+
f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
|
301 |
+
f' and `num_heads`: {self.num_heads}).'
|
302 |
+
)
|
303 |
+
|
304 |
+
self.wqkv = nn.Linear(
|
305 |
+
self.hidden_size,
|
306 |
+
(self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
|
307 |
+
bias=config.bias,
|
308 |
+
)
|
309 |
+
|
310 |
+
self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
311 |
+
self._init_rope()
|
312 |
+
|
313 |
+
"""
|
314 |
+
Phantom
|
315 |
+
"""
|
316 |
+
# Phantom Init
|
317 |
+
self.turn_on_phantom = True
|
318 |
+
self.xattn_query_phantom = XAttention(self.head_dim)
|
319 |
+
self.xattn_key_phantom = XAttention(self.head_dim)
|
320 |
+
self.xattn_value_phantom = XAttention(self.head_dim)
|
321 |
+
self.gating_phantom_1 = nn.Linear(self.head_dim, 1)
|
322 |
+
self.gating_phantom_2 = nn.Linear(self.head_dim, 1)
|
323 |
+
|
324 |
+
|
325 |
+
def _init_rope(self):
|
326 |
+
if self.config.rope_scaling is None:
|
327 |
+
self.rotary_emb = InternLM2RotaryEmbedding(
|
328 |
+
self.head_dim,
|
329 |
+
max_position_embeddings=self.max_position_embeddings,
|
330 |
+
base=self.config.rope_theta,
|
331 |
+
)
|
332 |
+
else:
|
333 |
+
scaling_type = self.config.rope_scaling['type']
|
334 |
+
scaling_factor = self.config.rope_scaling['factor']
|
335 |
+
if scaling_type == 'dynamic':
|
336 |
+
self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
|
337 |
+
self.head_dim,
|
338 |
+
max_position_embeddings=self.max_position_embeddings,
|
339 |
+
base=self.config.rope_theta,
|
340 |
+
scaling_factor=scaling_factor,
|
341 |
+
)
|
342 |
+
elif scaling_type == 'linear':
|
343 |
+
self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
|
344 |
+
self.head_dim,
|
345 |
+
max_position_embeddings=self.max_position_embeddings,
|
346 |
+
base=self.config.rope_theta,
|
347 |
+
scaling_factor=scaling_factor,
|
348 |
+
)
|
349 |
+
else:
|
350 |
+
raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
|
351 |
+
return self.rotary_emb
|
352 |
+
|
353 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
354 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
355 |
+
|
356 |
+
def forward(
|
357 |
+
self,
|
358 |
+
hidden_states: torch.Tensor,
|
359 |
+
attention_mask: Optional[torch.Tensor] = None,
|
360 |
+
position_ids: Optional[torch.LongTensor] = None,
|
361 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
362 |
+
output_attentions: bool = False,
|
363 |
+
use_cache: bool = False,
|
364 |
+
**kwargs,
|
365 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
366 |
+
if 'padding_mask' in kwargs:
|
367 |
+
warnings.warn(
|
368 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
369 |
+
'Please make sure use `attention_mask` instead.`'
|
370 |
+
)
|
371 |
+
|
372 |
+
bsz, q_len, _ = hidden_states.size()
|
373 |
+
|
374 |
+
qkv_states = self.wqkv(hidden_states)
|
375 |
+
|
376 |
+
qkv_states = rearrange(
|
377 |
+
qkv_states,
|
378 |
+
'b q (h gs d) -> b q h gs d',
|
379 |
+
gs=2 + self.num_key_value_groups,
|
380 |
+
d=self.head_dim,
|
381 |
+
)
|
382 |
+
|
383 |
+
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
384 |
+
query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
|
385 |
+
key_states = qkv_states[..., -2, :]
|
386 |
+
value_states = qkv_states[..., -1, :]
|
387 |
+
|
388 |
+
query_states = query_states.transpose(1, 2)
|
389 |
+
key_states = key_states.transpose(1, 2)
|
390 |
+
value_states = value_states.transpose(1, 2)
|
391 |
+
|
392 |
+
kv_seq_len = key_states.shape[-2]
|
393 |
+
if past_key_value is not None:
|
394 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
395 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
396 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
397 |
+
|
398 |
+
if past_key_value is not None:
|
399 |
+
# reuse k, v, self_attention
|
400 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
401 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
402 |
+
|
403 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
404 |
+
|
405 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
406 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
407 |
+
|
408 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
409 |
+
|
410 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
411 |
+
raise ValueError(
|
412 |
+
f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
|
413 |
+
f' {attn_weights.size()}'
|
414 |
+
)
|
415 |
+
|
416 |
+
if attention_mask is not None:
|
417 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
418 |
+
raise ValueError(
|
419 |
+
f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
|
420 |
+
)
|
421 |
+
attn_weights = attn_weights + attention_mask
|
422 |
+
|
423 |
+
# upcast attention to fp32
|
424 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
425 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
426 |
+
|
427 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
428 |
+
raise ValueError(
|
429 |
+
f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
|
430 |
+
f' {attn_output.size()}'
|
431 |
+
)
|
432 |
+
|
433 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
434 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
435 |
+
|
436 |
+
attn_output = self.wo(attn_output)
|
437 |
+
|
438 |
+
if not output_attentions:
|
439 |
+
attn_weights = None
|
440 |
+
|
441 |
+
return attn_output, attn_weights, past_key_value
|
442 |
+
|
443 |
+
|
444 |
+
# Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
|
445 |
+
class InternLM2FlashAttention2(InternLM2Attention):
|
446 |
+
"""
|
447 |
+
InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
|
448 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
449 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
450 |
+
"""
|
451 |
+
|
452 |
+
def forward(
|
453 |
+
self,
|
454 |
+
hidden_states: torch.Tensor,
|
455 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
456 |
+
position_ids: Optional[torch.LongTensor] = None,
|
457 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
458 |
+
phantom_position: torch.BoolTensor = None,
|
459 |
+
output_attentions: bool = False,
|
460 |
+
use_cache: bool = False,
|
461 |
+
**kwargs,
|
462 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
463 |
+
# InternLM2FlashAttention2 attention does not support output_attentions
|
464 |
+
if 'padding_mask' in kwargs:
|
465 |
+
warnings.warn(
|
466 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
467 |
+
'Please make sure use `attention_mask` instead.`'
|
468 |
+
)
|
469 |
+
|
470 |
+
# overwrite attention_mask with padding_mask
|
471 |
+
attention_mask = kwargs.pop('padding_mask')
|
472 |
+
|
473 |
+
output_attentions = False
|
474 |
+
|
475 |
+
bsz, q_len, _ = hidden_states.size()
|
476 |
+
|
477 |
+
qkv_states = self.wqkv(hidden_states)
|
478 |
+
|
479 |
+
qkv_states = rearrange(
|
480 |
+
qkv_states,
|
481 |
+
'b q (h gs d) -> b q h gs d',
|
482 |
+
gs=2 + self.num_key_value_groups,
|
483 |
+
d=self.head_dim,
|
484 |
+
)
|
485 |
+
|
486 |
+
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
487 |
+
query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
|
488 |
+
key_states = qkv_states[..., -2, :]
|
489 |
+
value_states = qkv_states[..., -1, :]
|
490 |
+
|
491 |
+
query_states = query_states.transpose(1, 2)
|
492 |
+
key_states = key_states.transpose(1, 2)
|
493 |
+
value_states = value_states.transpose(1, 2)
|
494 |
+
|
495 |
+
kv_seq_len = key_states.shape[-2]
|
496 |
+
if past_key_value is not None:
|
497 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
498 |
+
|
499 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
500 |
+
|
501 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
502 |
+
|
503 |
+
if past_key_value is not None:
|
504 |
+
# reuse k, v, self_attention
|
505 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
506 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
507 |
+
|
508 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
509 |
+
|
510 |
+
query_states = query_states.transpose(1, 2)
|
511 |
+
key_states = key_states.transpose(1, 2)
|
512 |
+
value_states = value_states.transpose(1, 2)
|
513 |
+
|
514 |
+
attn_output = self._flash_attention_forward(
|
515 |
+
query_states, key_states, value_states, attention_mask, q_len, phantom_position
|
516 |
+
)
|
517 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
518 |
+
attn_output = self.wo(attn_output)
|
519 |
+
|
520 |
+
if not output_attentions:
|
521 |
+
attn_weights = None
|
522 |
+
|
523 |
+
return attn_output, attn_weights, past_key_value
|
524 |
+
|
525 |
+
def _flash_attention_forward(
|
526 |
+
self, query_states, key_states, value_states, attention_mask, query_length, phantom_position, dropout=0.0, softmax_scale=None,
|
527 |
+
):
|
528 |
+
"""
|
529 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
530 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
531 |
+
|
532 |
+
Args:
|
533 |
+
query_states (`torch.Tensor`):
|
534 |
+
Input query states to be passed to Flash Attention API
|
535 |
+
key_states (`torch.Tensor`):
|
536 |
+
Input key states to be passed to Flash Attention API
|
537 |
+
value_states (`torch.Tensor`):
|
538 |
+
Input value states to be passed to Flash Attention API
|
539 |
+
attention_mask (`torch.Tensor`):
|
540 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
541 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
542 |
+
dropout (`int`, *optional*):
|
543 |
+
Attention dropout
|
544 |
+
softmax_scale (`float`, *optional*):
|
545 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
546 |
+
"""
|
547 |
+
|
548 |
+
"""
|
549 |
+
Phantom
|
550 |
+
"""
|
551 |
+
if self.turn_on_phantom:
|
552 |
+
|
553 |
+
# [Important] softmax_scale
|
554 |
+
softmax_scale = 1 / math.sqrt(query_states.shape[-1])
|
555 |
+
|
556 |
+
query_states_phantom = []
|
557 |
+
key_states_phantom = []
|
558 |
+
value_states_phantom = []
|
559 |
+
for index, pos in enumerate(phantom_position):
|
560 |
+
if query_states.shape[1] > 1:
|
561 |
+
query_states_phantom.append(query_states[index][pos])
|
562 |
+
key_states_phantom.append(key_states[index][pos])
|
563 |
+
value_states_phantom.append(value_states[index][pos])
|
564 |
+
|
565 |
+
# saving phantom qkv for inference
|
566 |
+
self.query_states_phantom = query_states_phantom
|
567 |
+
self.key_states_phantom = key_states_phantom
|
568 |
+
self.value_states_phantom = value_states_phantom
|
569 |
+
|
570 |
+
# phantom qkv: list to tensor
|
571 |
+
query_states_phantom = torch.stack(self.query_states_phantom)
|
572 |
+
key_states_phantom = torch.stack(self.key_states_phantom)
|
573 |
+
value_states_phantom = torch.stack(self.value_states_phantom)
|
574 |
+
|
575 |
+
# phantom qkv: 1 -> N (sequence)
|
576 |
+
query_states_phantom = self.xattn_query_phantom(q=query_states, k=query_states_phantom, v=query_states_phantom)
|
577 |
+
key_states_phantom = self.xattn_key_phantom(q=key_states, k=key_states_phantom, v=key_states_phantom)
|
578 |
+
value_states_phantom = self.xattn_value_phantom(q=value_states, k=value_states_phantom, v=value_states_phantom, is_residual=True)
|
579 |
+
|
580 |
+
# concat original qkv and phantom qkv for hidden-dimension / heads
|
581 |
+
query_states = torch.cat([query_states, query_states_phantom], dim=3)
|
582 |
+
key_states = torch.cat([key_states, key_states_phantom], dim=3)
|
583 |
+
value_states = torch.cat([value_states, value_states_phantom], dim=3)
|
584 |
+
|
585 |
+
# Contains at least one padding token in the sequence
|
586 |
+
causal = self.is_causal and query_length != 1
|
587 |
+
if attention_mask is not None:
|
588 |
+
batch_size = query_states.shape[0]
|
589 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
|
590 |
+
query_states, key_states, value_states, attention_mask, query_length
|
591 |
+
)
|
592 |
+
|
593 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
594 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
595 |
+
|
596 |
+
attn_output_unpad = flash_attn_varlen_func(
|
597 |
+
query_states,
|
598 |
+
key_states,
|
599 |
+
value_states,
|
600 |
+
cu_seqlens_q=cu_seqlens_q,
|
601 |
+
cu_seqlens_k=cu_seqlens_k,
|
602 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
603 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
604 |
+
dropout_p=dropout,
|
605 |
+
softmax_scale=softmax_scale,
|
606 |
+
causal=causal,
|
607 |
+
)
|
608 |
+
|
609 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
610 |
+
else:
|
611 |
+
attn_output = flash_attn_func(
|
612 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
613 |
+
)
|
614 |
+
|
615 |
+
"""
|
616 |
+
Phantom
|
617 |
+
"""
|
618 |
+
if self.turn_on_phantom:
|
619 |
+
half_dim = attn_output.shape[-1] // 2
|
620 |
+
half1_o = attn_output[...,:half_dim]
|
621 |
+
half2_o = attn_output[...,half_dim:]
|
622 |
+
weight1 = self.gating_phantom_1(half1_o)
|
623 |
+
weight2 = self.gating_phantom_2(half2_o)
|
624 |
+
weight_norm = weight1.exp() / (weight1.exp() + weight2.exp())
|
625 |
+
attn_output = weight_norm * half1_o + (1-weight_norm) * half2_o
|
626 |
+
return attn_output
|
627 |
+
|
628 |
+
def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
629 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
630 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
631 |
+
|
632 |
+
key_layer = index_first_axis(
|
633 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
634 |
+
)
|
635 |
+
value_layer = index_first_axis(
|
636 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
637 |
+
)
|
638 |
+
|
639 |
+
if query_length == kv_seq_len:
|
640 |
+
query_layer = index_first_axis(
|
641 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
642 |
+
)
|
643 |
+
cu_seqlens_q = cu_seqlens_k
|
644 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
645 |
+
indices_q = indices_k
|
646 |
+
elif query_length == 1:
|
647 |
+
max_seqlen_in_batch_q = 1
|
648 |
+
cu_seqlens_q = torch.arange(
|
649 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
650 |
+
) # There is a memcpy here, that is very bad.
|
651 |
+
indices_q = cu_seqlens_q[:-1]
|
652 |
+
query_layer = query_layer.squeeze(1)
|
653 |
+
else:
|
654 |
+
# The -q_len: slice assumes left padding.
|
655 |
+
attention_mask = attention_mask[:, -query_length:]
|
656 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
657 |
+
|
658 |
+
return (
|
659 |
+
query_layer,
|
660 |
+
key_layer,
|
661 |
+
value_layer,
|
662 |
+
indices_q.to(torch.int64),
|
663 |
+
(cu_seqlens_q, cu_seqlens_k),
|
664 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
665 |
+
)
|
666 |
+
|
667 |
+
|
668 |
+
INTERNLM2_ATTENTION_CLASSES = {
|
669 |
+
'eager': InternLM2Attention,
|
670 |
+
'flash_attention_2': InternLM2FlashAttention2,
|
671 |
+
}
|
672 |
+
|
673 |
+
|
674 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
|
675 |
+
class InternLM2DecoderLayer(nn.Module):
|
676 |
+
def __init__(self, config: InternLM2Config):
|
677 |
+
super().__init__()
|
678 |
+
self.hidden_size = config.hidden_size
|
679 |
+
|
680 |
+
self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
|
681 |
+
|
682 |
+
self.feed_forward = InternLM2MLP(config)
|
683 |
+
self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
684 |
+
self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
685 |
+
|
686 |
+
def forward(
|
687 |
+
self,
|
688 |
+
hidden_states: torch.Tensor,
|
689 |
+
attention_mask: Optional[torch.Tensor] = None,
|
690 |
+
position_ids: Optional[torch.LongTensor] = None,
|
691 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
692 |
+
phantom_position: torch.BoolTensor = None,
|
693 |
+
output_attentions: Optional[bool] = False,
|
694 |
+
use_cache: Optional[bool] = False,
|
695 |
+
**kwargs,
|
696 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
697 |
+
"""
|
698 |
+
Args:
|
699 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
700 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
701 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
702 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
703 |
+
output_attentions (`bool`, *optional*):
|
704 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
705 |
+
returned tensors for more detail.
|
706 |
+
use_cache (`bool`, *optional*):
|
707 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
708 |
+
(see `past_key_values`).
|
709 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
710 |
+
"""
|
711 |
+
if 'padding_mask' in kwargs:
|
712 |
+
warnings.warn(
|
713 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
714 |
+
'Please make sure use `attention_mask` instead.`'
|
715 |
+
)
|
716 |
+
|
717 |
+
residual = hidden_states
|
718 |
+
|
719 |
+
hidden_states = self.attention_norm(hidden_states)
|
720 |
+
|
721 |
+
# Self Attention
|
722 |
+
hidden_states, self_attn_weights, present_key_value = self.attention(
|
723 |
+
hidden_states=hidden_states,
|
724 |
+
attention_mask=attention_mask,
|
725 |
+
position_ids=position_ids,
|
726 |
+
past_key_value=past_key_value,
|
727 |
+
phantom_position=phantom_position,
|
728 |
+
output_attentions=output_attentions,
|
729 |
+
use_cache=use_cache,
|
730 |
+
**kwargs,
|
731 |
+
)
|
732 |
+
hidden_states = residual + hidden_states
|
733 |
+
|
734 |
+
# Fully Connected
|
735 |
+
residual = hidden_states
|
736 |
+
hidden_states = self.ffn_norm(hidden_states)
|
737 |
+
hidden_states = self.feed_forward(hidden_states)
|
738 |
+
hidden_states = residual + hidden_states
|
739 |
+
|
740 |
+
outputs = (hidden_states,)
|
741 |
+
|
742 |
+
if output_attentions:
|
743 |
+
outputs += (self_attn_weights,)
|
744 |
+
|
745 |
+
if use_cache:
|
746 |
+
outputs += (present_key_value,)
|
747 |
+
|
748 |
+
return outputs
|
749 |
+
|
750 |
+
|
751 |
+
InternLM2_START_DOCSTRING = r"""
|
752 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
753 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
754 |
+
etc.)
|
755 |
+
|
756 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
757 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
758 |
+
and behavior.
|
759 |
+
|
760 |
+
Parameters:
|
761 |
+
config ([`InternLM2Config`]):
|
762 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
763 |
+
load the weights associated with the model, only the configuration. Check out the
|
764 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
765 |
+
"""
|
766 |
+
|
767 |
+
|
768 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
|
769 |
+
@add_start_docstrings(
|
770 |
+
'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
|
771 |
+
InternLM2_START_DOCSTRING,
|
772 |
+
)
|
773 |
+
class InternLM2PreTrainedModel(PreTrainedModel):
|
774 |
+
config_class = InternLM2Config
|
775 |
+
base_model_prefix = 'model'
|
776 |
+
supports_gradient_checkpointing = True
|
777 |
+
_no_split_modules = ['InternLM2DecoderLayer']
|
778 |
+
_skip_keys_device_placement = 'past_key_values'
|
779 |
+
_supports_flash_attn_2 = True
|
780 |
+
|
781 |
+
def _init_weights(self, module):
|
782 |
+
std = self.config.initializer_range
|
783 |
+
if isinstance(module, nn.Linear):
|
784 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
785 |
+
if module.bias is not None:
|
786 |
+
module.bias.data.zero_()
|
787 |
+
elif isinstance(module, nn.Embedding):
|
788 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
789 |
+
if module.padding_idx is not None:
|
790 |
+
module.weight.data[module.padding_idx].zero_()
|
791 |
+
|
792 |
+
|
793 |
+
InternLM2_INPUTS_DOCSTRING = r"""
|
794 |
+
Args:
|
795 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
796 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
797 |
+
it.
|
798 |
+
|
799 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
800 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
801 |
+
|
802 |
+
[What are input IDs?](../glossary#input-ids)
|
803 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
804 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
805 |
+
|
806 |
+
- 1 for tokens that are **not masked**,
|
807 |
+
- 0 for tokens that are **masked**.
|
808 |
+
|
809 |
+
[What are attention masks?](../glossary#attention-mask)
|
810 |
+
|
811 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
812 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
813 |
+
|
814 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
815 |
+
`past_key_values`).
|
816 |
+
|
817 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
818 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
819 |
+
information on the default strategy.
|
820 |
+
|
821 |
+
- 1 indicates the head is **not masked**,
|
822 |
+
- 0 indicates the head is **masked**.
|
823 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
824 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
825 |
+
config.n_positions - 1]`.
|
826 |
+
|
827 |
+
[What are position IDs?](../glossary#position-ids)
|
828 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
|
829 |
+
when `config.use_cache=True`):
|
830 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
831 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
832 |
+
`(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
|
833 |
+
|
834 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
835 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
836 |
+
|
837 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
838 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
839 |
+
of shape `(batch_size, sequence_length)`.
|
840 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
841 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
842 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
843 |
+
model's internal embedding lookup matrix.
|
844 |
+
use_cache (`bool`, *optional*):
|
845 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
846 |
+
`past_key_values`).
|
847 |
+
output_attentions (`bool`, *optional*):
|
848 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
849 |
+
tensors for more detail.
|
850 |
+
output_hidden_states (`bool`, *optional*):
|
851 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
852 |
+
more detail.
|
853 |
+
return_dict (`bool`, *optional*):
|
854 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
855 |
+
"""
|
856 |
+
|
857 |
+
|
858 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaModel
|
859 |
+
@add_start_docstrings(
|
860 |
+
'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
|
861 |
+
InternLM2_START_DOCSTRING,
|
862 |
+
)
|
863 |
+
class InternLM2Model(InternLM2PreTrainedModel):
|
864 |
+
"""
|
865 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
|
866 |
+
|
867 |
+
Args:
|
868 |
+
config: InternLM2Config
|
869 |
+
"""
|
870 |
+
|
871 |
+
_auto_class = 'AutoModel'
|
872 |
+
|
873 |
+
def __init__(self, config: InternLM2Config):
|
874 |
+
super().__init__(config)
|
875 |
+
self.padding_idx = config.pad_token_id
|
876 |
+
self.vocab_size = config.vocab_size
|
877 |
+
self.config = config
|
878 |
+
if not has_flash_attn:
|
879 |
+
self.config.attn_implementation = 'eager'
|
880 |
+
print('Warning: Flash attention is not available, using eager attention instead.')
|
881 |
+
|
882 |
+
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
883 |
+
|
884 |
+
self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
885 |
+
self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
886 |
+
|
887 |
+
self.gradient_checkpointing = False
|
888 |
+
# Initialize weights and apply final processing
|
889 |
+
self.post_init()
|
890 |
+
|
891 |
+
def get_input_embeddings(self):
|
892 |
+
return self.tok_embeddings
|
893 |
+
|
894 |
+
def set_input_embeddings(self, value):
|
895 |
+
self.tok_embeddings = value
|
896 |
+
|
897 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
898 |
+
# create causal mask
|
899 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
900 |
+
combined_attention_mask = None
|
901 |
+
if input_shape[-1] > 1:
|
902 |
+
combined_attention_mask = _make_causal_mask(
|
903 |
+
input_shape,
|
904 |
+
inputs_embeds.dtype,
|
905 |
+
device=inputs_embeds.device,
|
906 |
+
past_key_values_length=past_key_values_length,
|
907 |
+
)
|
908 |
+
|
909 |
+
if attention_mask is not None:
|
910 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
911 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
912 |
+
inputs_embeds.device
|
913 |
+
)
|
914 |
+
combined_attention_mask = (
|
915 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
916 |
+
)
|
917 |
+
|
918 |
+
return combined_attention_mask
|
919 |
+
|
920 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
921 |
+
def forward(
|
922 |
+
self,
|
923 |
+
input_ids: torch.LongTensor = None,
|
924 |
+
attention_mask: Optional[torch.Tensor] = None,
|
925 |
+
position_ids: Optional[torch.LongTensor] = None,
|
926 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
927 |
+
phantom_position: torch.BoolTensor = None,
|
928 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
929 |
+
use_cache: Optional[bool] = None,
|
930 |
+
output_attentions: Optional[bool] = None,
|
931 |
+
output_hidden_states: Optional[bool] = None,
|
932 |
+
return_dict: Optional[bool] = None,
|
933 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
934 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
935 |
+
output_hidden_states = (
|
936 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
937 |
+
)
|
938 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
939 |
+
|
940 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
941 |
+
|
942 |
+
if self.config.attn_implementation == 'flash_attention_2':
|
943 |
+
_import_flash_attn()
|
944 |
+
|
945 |
+
# retrieve input_ids and inputs_embeds
|
946 |
+
if input_ids is not None and inputs_embeds is not None:
|
947 |
+
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
|
948 |
+
elif input_ids is not None:
|
949 |
+
batch_size, seq_length = input_ids.shape[:2]
|
950 |
+
elif inputs_embeds is not None:
|
951 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
952 |
+
else:
|
953 |
+
raise ValueError('You have to specify either input_ids or inputs_embeds')
|
954 |
+
|
955 |
+
seq_length_with_past = seq_length
|
956 |
+
past_key_values_length = 0
|
957 |
+
if past_key_values is not None:
|
958 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
959 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
960 |
+
|
961 |
+
if position_ids is None:
|
962 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
963 |
+
position_ids = torch.arange(
|
964 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
965 |
+
)
|
966 |
+
position_ids = position_ids.unsqueeze(0)
|
967 |
+
|
968 |
+
if inputs_embeds is None:
|
969 |
+
inputs_embeds = self.tok_embeddings(input_ids)
|
970 |
+
|
971 |
+
if self.config.attn_implementation == 'flash_attention_2':
|
972 |
+
# 2d mask is passed through the layers
|
973 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
974 |
+
else:
|
975 |
+
if attention_mask is None:
|
976 |
+
attention_mask = torch.ones(
|
977 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
978 |
+
)
|
979 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
980 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
981 |
+
)
|
982 |
+
|
983 |
+
# embed positions
|
984 |
+
hidden_states = inputs_embeds
|
985 |
+
|
986 |
+
if self.gradient_checkpointing and self.training:
|
987 |
+
if use_cache:
|
988 |
+
logger.warning_once(
|
989 |
+
'`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
|
990 |
+
)
|
991 |
+
use_cache = False
|
992 |
+
|
993 |
+
# decoder layers
|
994 |
+
all_hidden_states = () if output_hidden_states else None
|
995 |
+
all_self_attns = () if output_attentions else None
|
996 |
+
next_decoder_cache = () if use_cache else None
|
997 |
+
|
998 |
+
for idx, decoder_layer in enumerate(self.layers):
|
999 |
+
if output_hidden_states:
|
1000 |
+
all_hidden_states += (hidden_states,)
|
1001 |
+
|
1002 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
1003 |
+
|
1004 |
+
if self.gradient_checkpointing and self.training:
|
1005 |
+
|
1006 |
+
def create_custom_forward(module):
|
1007 |
+
def custom_forward(*inputs):
|
1008 |
+
# None for past_key_value
|
1009 |
+
return module(*inputs, output_attentions, None)
|
1010 |
+
|
1011 |
+
return custom_forward
|
1012 |
+
|
1013 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
1014 |
+
create_custom_forward(decoder_layer),
|
1015 |
+
hidden_states,
|
1016 |
+
attention_mask,
|
1017 |
+
position_ids,
|
1018 |
+
None,
|
1019 |
+
phantom_position,
|
1020 |
+
)
|
1021 |
+
else:
|
1022 |
+
layer_outputs = decoder_layer(
|
1023 |
+
hidden_states,
|
1024 |
+
attention_mask=attention_mask,
|
1025 |
+
position_ids=position_ids,
|
1026 |
+
past_key_value=past_key_value,
|
1027 |
+
phantom_position=phantom_position,
|
1028 |
+
output_attentions=output_attentions,
|
1029 |
+
use_cache=use_cache,
|
1030 |
+
)
|
1031 |
+
|
1032 |
+
hidden_states = layer_outputs[0]
|
1033 |
+
|
1034 |
+
if use_cache:
|
1035 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
1036 |
+
|
1037 |
+
if output_attentions:
|
1038 |
+
all_self_attns += (layer_outputs[1],)
|
1039 |
+
|
1040 |
+
hidden_states = self.norm(hidden_states)
|
1041 |
+
|
1042 |
+
# add hidden states from the last decoder layer
|
1043 |
+
if output_hidden_states:
|
1044 |
+
all_hidden_states += (hidden_states,)
|
1045 |
+
|
1046 |
+
next_cache = next_decoder_cache if use_cache else None
|
1047 |
+
if not return_dict:
|
1048 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1049 |
+
return BaseModelOutputWithPast(
|
1050 |
+
last_hidden_state=hidden_states,
|
1051 |
+
past_key_values=next_cache,
|
1052 |
+
hidden_states=all_hidden_states,
|
1053 |
+
attentions=all_self_attns,
|
1054 |
+
)
|
1055 |
+
|
1056 |
+
|
1057 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
|
1058 |
+
class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
1059 |
+
_auto_class = 'AutoModelForCausalLM'
|
1060 |
+
|
1061 |
+
_tied_weights_keys = ['output.weight']
|
1062 |
+
|
1063 |
+
def __init__(self, config):
|
1064 |
+
super().__init__(config)
|
1065 |
+
self.model = InternLM2Model(config)
|
1066 |
+
self.vocab_size = config.vocab_size
|
1067 |
+
self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1068 |
+
|
1069 |
+
# Initialize weights and apply final processing
|
1070 |
+
self.post_init()
|
1071 |
+
|
1072 |
+
def get_input_embeddings(self):
|
1073 |
+
return self.model.tok_embeddings
|
1074 |
+
|
1075 |
+
def set_input_embeddings(self, value):
|
1076 |
+
self.model.tok_embeddings = value
|
1077 |
+
|
1078 |
+
def get_output_embeddings(self):
|
1079 |
+
return self.output
|
1080 |
+
|
1081 |
+
def set_output_embeddings(self, new_embeddings):
|
1082 |
+
self.output = new_embeddings
|
1083 |
+
|
1084 |
+
def set_decoder(self, decoder):
|
1085 |
+
self.model = decoder
|
1086 |
+
|
1087 |
+
def get_decoder(self):
|
1088 |
+
return self.model
|
1089 |
+
|
1090 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1091 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1092 |
+
def forward(
|
1093 |
+
self,
|
1094 |
+
input_ids: torch.LongTensor = None,
|
1095 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1096 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1097 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1098 |
+
phantom_position: torch.BoolTensor = None,
|
1099 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1100 |
+
labels: Optional[torch.LongTensor] = None,
|
1101 |
+
use_cache: Optional[bool] = None,
|
1102 |
+
output_attentions: Optional[bool] = None,
|
1103 |
+
output_hidden_states: Optional[bool] = None,
|
1104 |
+
return_dict: Optional[bool] = None,
|
1105 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1106 |
+
r"""
|
1107 |
+
Args:
|
1108 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1109 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1110 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1111 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1112 |
+
|
1113 |
+
Returns:
|
1114 |
+
|
1115 |
+
Example:
|
1116 |
+
|
1117 |
+
```python
|
1118 |
+
>>> from transformers import AutoTokenizer, InternLM2ForCausalLM
|
1119 |
+
|
1120 |
+
>>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1121 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1122 |
+
|
1123 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1124 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1125 |
+
|
1126 |
+
>>> # Generate
|
1127 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1128 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1129 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
1130 |
+
```"""
|
1131 |
+
|
1132 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1133 |
+
output_hidden_states = (
|
1134 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1135 |
+
)
|
1136 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1137 |
+
|
1138 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1139 |
+
outputs = self.model(
|
1140 |
+
input_ids=input_ids,
|
1141 |
+
attention_mask=attention_mask,
|
1142 |
+
position_ids=position_ids,
|
1143 |
+
past_key_values=past_key_values,
|
1144 |
+
phantom_position=phantom_position,
|
1145 |
+
inputs_embeds=inputs_embeds,
|
1146 |
+
use_cache=use_cache,
|
1147 |
+
output_attentions=output_attentions,
|
1148 |
+
output_hidden_states=output_hidden_states,
|
1149 |
+
return_dict=return_dict,
|
1150 |
+
)
|
1151 |
+
|
1152 |
+
hidden_states = outputs[0]
|
1153 |
+
logits = self.output(hidden_states)
|
1154 |
+
logits = logits.float()
|
1155 |
+
|
1156 |
+
loss = None
|
1157 |
+
if labels is not None:
|
1158 |
+
# Shift so that tokens < n predict n
|
1159 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1160 |
+
shift_labels = labels[..., 1:].contiguous()
|
1161 |
+
# Flatten the tokens
|
1162 |
+
loss_fct = CrossEntropyLoss()
|
1163 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1164 |
+
shift_labels = shift_labels.view(-1)
|
1165 |
+
# Enable model parallelism
|
1166 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1167 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1168 |
+
|
1169 |
+
if not return_dict:
|
1170 |
+
output = (logits,) + outputs[1:]
|
1171 |
+
return (loss,) + output if loss is not None else output
|
1172 |
+
|
1173 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1174 |
+
output = CausalLMOutputWithPast(
|
1175 |
+
loss=loss,
|
1176 |
+
logits=logits,
|
1177 |
+
past_key_values=outputs.past_key_values,
|
1178 |
+
hidden_states=outputs.hidden_states,
|
1179 |
+
attentions=outputs.attentions,
|
1180 |
+
)
|
1181 |
+
output['logits'] = output['logits'].to(device)
|
1182 |
+
return output
|
1183 |
+
|
1184 |
+
def prepare_inputs_for_generation(
|
1185 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1186 |
+
):
|
1187 |
+
if past_key_values is not None:
|
1188 |
+
past_length = past_key_values[0][0].shape[2]
|
1189 |
+
|
1190 |
+
# Some generation methods already pass only the last input ID
|
1191 |
+
if input_ids.shape[1] > past_length:
|
1192 |
+
remove_prefix_length = past_length
|
1193 |
+
else:
|
1194 |
+
# Default to old behavior: keep only final ID
|
1195 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1196 |
+
|
1197 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1198 |
+
|
1199 |
+
position_ids = kwargs.get('position_ids', None)
|
1200 |
+
if attention_mask is not None and position_ids is None:
|
1201 |
+
# create position_ids on the fly for batch generation
|
1202 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1203 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1204 |
+
if past_key_values:
|
1205 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1206 |
+
|
1207 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1208 |
+
if inputs_embeds is not None and past_key_values is None:
|
1209 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
1210 |
+
else:
|
1211 |
+
model_inputs = {'input_ids': input_ids}
|
1212 |
+
|
1213 |
+
model_inputs.update(
|
1214 |
+
{
|
1215 |
+
'position_ids': position_ids,
|
1216 |
+
'past_key_values': past_key_values,
|
1217 |
+
'phantom_position': kwargs.get('phantom_position'),
|
1218 |
+
'use_cache': kwargs.get('use_cache'),
|
1219 |
+
'attention_mask': attention_mask,
|
1220 |
+
}
|
1221 |
+
)
|
1222 |
+
return model_inputs
|
1223 |
+
|
1224 |
+
@staticmethod
|
1225 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1226 |
+
reordered_past = ()
|
1227 |
+
for layer_past in past_key_values:
|
1228 |
+
reordered_past += (
|
1229 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1230 |
+
)
|
1231 |
+
return reordered_past
|
1232 |
+
|
1233 |
+
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
|
1234 |
+
if tokenizer.add_bos_token:
|
1235 |
+
prompt = ''
|
1236 |
+
else:
|
1237 |
+
prompt = tokenizer.bos_token
|
1238 |
+
if meta_instruction:
|
1239 |
+
prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
|
1240 |
+
for record in history:
|
1241 |
+
prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
|
1242 |
+
prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
|
1243 |
+
return tokenizer([prompt], return_tensors='pt')
|
1244 |
+
|
1245 |
+
@torch.no_grad()
|
1246 |
+
def chat(
|
1247 |
+
self,
|
1248 |
+
tokenizer,
|
1249 |
+
query: str,
|
1250 |
+
history: List[Tuple[str, str]] = [],
|
1251 |
+
streamer: Optional[BaseStreamer] = None,
|
1252 |
+
max_new_tokens: int = 1024,
|
1253 |
+
do_sample: bool = True,
|
1254 |
+
temperature: float = 0.8,
|
1255 |
+
top_p: float = 0.8,
|
1256 |
+
meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
|
1257 |
+
'- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
|
1258 |
+
'- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
|
1259 |
+
**kwargs,
|
1260 |
+
):
|
1261 |
+
inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
|
1262 |
+
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
1263 |
+
# also add end-of-assistant token in eos token id to avoid unnecessary generation
|
1264 |
+
eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
|
1265 |
+
outputs = self.generate(
|
1266 |
+
**inputs,
|
1267 |
+
streamer=streamer,
|
1268 |
+
max_new_tokens=max_new_tokens,
|
1269 |
+
do_sample=do_sample,
|
1270 |
+
temperature=temperature,
|
1271 |
+
top_p=top_p,
|
1272 |
+
eos_token_id=eos_token_id,
|
1273 |
+
**kwargs,
|
1274 |
+
)
|
1275 |
+
outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]) :]
|
1276 |
+
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
1277 |
+
response = response.split('<|im_end|>')[0]
|
1278 |
+
history = history + [(query, response)]
|
1279 |
+
return response, history
|
1280 |
+
|
1281 |
+
@torch.no_grad()
|
1282 |
+
def stream_chat(
|
1283 |
+
self,
|
1284 |
+
tokenizer,
|
1285 |
+
query: str,
|
1286 |
+
history: List[Tuple[str, str]] = [],
|
1287 |
+
max_new_tokens: int = 1024,
|
1288 |
+
do_sample: bool = True,
|
1289 |
+
temperature: float = 0.8,
|
1290 |
+
top_p: float = 0.8,
|
1291 |
+
**kwargs,
|
1292 |
+
):
|
1293 |
+
"""
|
1294 |
+
Return a generator in format: (response, history)
|
1295 |
+
Eg.
|
1296 |
+
('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
|
1297 |
+
('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
|
1298 |
+
"""
|
1299 |
+
if BaseStreamer is None:
|
1300 |
+
raise ModuleNotFoundError(
|
1301 |
+
'The version of `transformers` is too low. Please make sure '
|
1302 |
+
'that you have installed `transformers>=4.28.0`.'
|
1303 |
+
)
|
1304 |
+
|
1305 |
+
response_queue = queue.Queue(maxsize=20)
|
1306 |
+
|
1307 |
+
class ChatStreamer(BaseStreamer):
|
1308 |
+
def __init__(self, tokenizer) -> None:
|
1309 |
+
super().__init__()
|
1310 |
+
self.tokenizer = tokenizer
|
1311 |
+
self.queue = response_queue
|
1312 |
+
self.query = query
|
1313 |
+
self.history = history
|
1314 |
+
self.response = ''
|
1315 |
+
self.cache = []
|
1316 |
+
self.received_inputs = False
|
1317 |
+
self.queue.put((self.response, history + [(self.query, self.response)]))
|
1318 |
+
|
1319 |
+
def put(self, value):
|
1320 |
+
if len(value.shape) > 1 and value.shape[0] > 1:
|
1321 |
+
raise ValueError('ChatStreamer only supports batch size 1')
|
1322 |
+
elif len(value.shape) > 1:
|
1323 |
+
value = value[0]
|
1324 |
+
|
1325 |
+
if not self.received_inputs:
|
1326 |
+
# The first received value is input_ids, ignore here
|
1327 |
+
self.received_inputs = True
|
1328 |
+
return
|
1329 |
+
|
1330 |
+
self.cache.extend(value.tolist())
|
1331 |
+
token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
|
1332 |
+
if token.strip() != '<|im_end|>':
|
1333 |
+
self.response = self.response + token
|
1334 |
+
history = self.history + [(self.query, self.response)]
|
1335 |
+
self.queue.put((self.response, history))
|
1336 |
+
self.cache = []
|
1337 |
+
else:
|
1338 |
+
self.end()
|
1339 |
+
|
1340 |
+
def end(self):
|
1341 |
+
self.queue.put(None)
|
1342 |
+
|
1343 |
+
def stream_producer():
|
1344 |
+
return self.chat(
|
1345 |
+
tokenizer=tokenizer,
|
1346 |
+
query=query,
|
1347 |
+
streamer=ChatStreamer(tokenizer=tokenizer),
|
1348 |
+
history=history,
|
1349 |
+
max_new_tokens=max_new_tokens,
|
1350 |
+
do_sample=do_sample,
|
1351 |
+
temperature=temperature,
|
1352 |
+
top_p=top_p,
|
1353 |
+
**kwargs,
|
1354 |
+
)
|
1355 |
+
|
1356 |
+
def consumer():
|
1357 |
+
producer = threading.Thread(target=stream_producer)
|
1358 |
+
producer.start()
|
1359 |
+
while True:
|
1360 |
+
res = response_queue.get()
|
1361 |
+
if res is None:
|
1362 |
+
return
|
1363 |
+
yield res
|
1364 |
+
|
1365 |
+
return consumer()
|
1366 |
+
|
1367 |
+
|
1368 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
|
1369 |
+
@add_start_docstrings(
|
1370 |
+
"""
|
1371 |
+
The InternLM2 Model transformer with a sequence classification head on top (linear layer).
|
1372 |
+
|
1373 |
+
[`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
|
1374 |
+
as other causal models (e.g. GPT-2) do.
|
1375 |
+
|
1376 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1377 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1378 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1379 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1380 |
+
each row of the batch).
|
1381 |
+
""",
|
1382 |
+
InternLM2_START_DOCSTRING,
|
1383 |
+
)
|
1384 |
+
class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
1385 |
+
def __init__(self, config):
|
1386 |
+
super().__init__(config)
|
1387 |
+
self.num_labels = config.num_labels
|
1388 |
+
self.model = InternLM2Model(config)
|
1389 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1390 |
+
|
1391 |
+
# Initialize weights and apply final processing
|
1392 |
+
self.post_init()
|
1393 |
+
|
1394 |
+
def get_input_embeddings(self):
|
1395 |
+
return self.model.tok_embeddings
|
1396 |
+
|
1397 |
+
def set_input_embeddings(self, value):
|
1398 |
+
self.model.tok_embeddings = value
|
1399 |
+
|
1400 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1401 |
+
def forward(
|
1402 |
+
self,
|
1403 |
+
input_ids: torch.LongTensor = None,
|
1404 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1405 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1406 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1407 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1408 |
+
labels: Optional[torch.LongTensor] = None,
|
1409 |
+
use_cache: Optional[bool] = None,
|
1410 |
+
output_attentions: Optional[bool] = None,
|
1411 |
+
output_hidden_states: Optional[bool] = None,
|
1412 |
+
return_dict: Optional[bool] = None,
|
1413 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1414 |
+
r"""
|
1415 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1416 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1417 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1418 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1419 |
+
"""
|
1420 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1421 |
+
|
1422 |
+
transformer_outputs = self.model(
|
1423 |
+
input_ids,
|
1424 |
+
attention_mask=attention_mask,
|
1425 |
+
position_ids=position_ids,
|
1426 |
+
past_key_values=past_key_values,
|
1427 |
+
inputs_embeds=inputs_embeds,
|
1428 |
+
use_cache=use_cache,
|
1429 |
+
output_attentions=output_attentions,
|
1430 |
+
output_hidden_states=output_hidden_states,
|
1431 |
+
return_dict=return_dict,
|
1432 |
+
)
|
1433 |
+
hidden_states = transformer_outputs[0]
|
1434 |
+
logits = self.score(hidden_states)
|
1435 |
+
|
1436 |
+
if input_ids is not None:
|
1437 |
+
batch_size = input_ids.shape[0]
|
1438 |
+
else:
|
1439 |
+
batch_size = inputs_embeds.shape[0]
|
1440 |
+
|
1441 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1442 |
+
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
|
1443 |
+
if self.config.pad_token_id is None:
|
1444 |
+
sequence_lengths = -1
|
1445 |
+
else:
|
1446 |
+
if input_ids is not None:
|
1447 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
|
1448 |
+
logits.device
|
1449 |
+
)
|
1450 |
+
else:
|
1451 |
+
sequence_lengths = -1
|
1452 |
+
|
1453 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1454 |
+
|
1455 |
+
loss = None
|
1456 |
+
if labels is not None:
|
1457 |
+
labels = labels.to(logits.device)
|
1458 |
+
if self.config.problem_type is None:
|
1459 |
+
if self.num_labels == 1:
|
1460 |
+
self.config.problem_type = 'regression'
|
1461 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1462 |
+
self.config.problem_type = 'single_label_classification'
|
1463 |
+
else:
|
1464 |
+
self.config.problem_type = 'multi_label_classification'
|
1465 |
+
|
1466 |
+
if self.config.problem_type == 'regression':
|
1467 |
+
loss_fct = MSELoss()
|
1468 |
+
if self.num_labels == 1:
|
1469 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1470 |
+
else:
|
1471 |
+
loss = loss_fct(pooled_logits, labels)
|
1472 |
+
elif self.config.problem_type == 'single_label_classification':
|
1473 |
+
loss_fct = CrossEntropyLoss()
|
1474 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1475 |
+
elif self.config.problem_type == 'multi_label_classification':
|
1476 |
+
loss_fct = BCEWithLogitsLoss()
|
1477 |
+
loss = loss_fct(pooled_logits, labels)
|
1478 |
+
if not return_dict:
|
1479 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1480 |
+
return ((loss,) + output) if loss is not None else output
|
1481 |
+
|
1482 |
+
return SequenceClassifierOutputWithPast(
|
1483 |
+
loss=loss,
|
1484 |
+
logits=pooled_logits,
|
1485 |
+
past_key_values=transformer_outputs.past_key_values,
|
1486 |
+
hidden_states=transformer_outputs.hidden_states,
|
1487 |
+
attentions=transformer_outputs.attentions,
|
1488 |
+
)
|
model/arch_1_8b/modeling_phantom.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch.utils.checkpoint
|
4 |
+
from torch import nn
|
5 |
+
from transformers import GenerationConfig
|
6 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
7 |
+
from transformers.modeling_utils import PreTrainedModel
|
8 |
+
|
9 |
+
from .configuration_phantom import PhantomConfig
|
10 |
+
from .modeling_intern_vit import InternVisionModel
|
11 |
+
from .modeling_internlm2 import InternLM2ForCausalLM
|
12 |
+
|
13 |
+
from utils.utils import *
|
14 |
+
|
15 |
+
class PhantomForCausalLM(PreTrainedModel):
|
16 |
+
config_class = PhantomConfig
|
17 |
+
main_input_name = 'pixel_values'
|
18 |
+
_supports_flash_attn_2 = True
|
19 |
+
_no_split_modules = ['InternVisionModel', 'InternLM2DecoderLayer']
|
20 |
+
|
21 |
+
def __init__(self, config: PhantomConfig):
|
22 |
+
super().__init__(config)
|
23 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
24 |
+
patch_size = config.vision_config.patch_size
|
25 |
+
self.patch_size = patch_size
|
26 |
+
self.template = config.template
|
27 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
28 |
+
self.downsample_ratio = config.downsample_ratio
|
29 |
+
|
30 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
31 |
+
self.language_model = InternLM2ForCausalLM(config.llm_config)
|
32 |
+
|
33 |
+
vit_hidden_size = config.vision_config.hidden_size
|
34 |
+
llm_hidden_size = config.llm_config.hidden_size
|
35 |
+
|
36 |
+
self.vision_proj = nn.Sequential(
|
37 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
38 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
39 |
+
nn.GELU(),
|
40 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
41 |
+
)
|
42 |
+
|
43 |
+
# prompt rule
|
44 |
+
self.prompt_rule = {
|
45 |
+
"system_start": "<|im_start|>system\n",
|
46 |
+
"system_end": "<|im_end|>",
|
47 |
+
"user_start": "<|im_start|>user\n",
|
48 |
+
"user_end": "<|im_end|>",
|
49 |
+
"assistant_start": "<|im_start|>assistant\n",
|
50 |
+
"assistant_end": "<|im_end|>",
|
51 |
+
"test_start": "assistant\n",
|
52 |
+
"test_end": "<|im_end|>",
|
53 |
+
"split": "",
|
54 |
+
}
|
55 |
+
|
56 |
+
def eval_process(
|
57 |
+
self,
|
58 |
+
inputs,
|
59 |
+
tokenizer,
|
60 |
+
data,
|
61 |
+
device,
|
62 |
+
):
|
63 |
+
batched_image=[]
|
64 |
+
batched_qa_prompt=[]
|
65 |
+
batched_phantom_position = []
|
66 |
+
for _input in inputs:
|
67 |
+
|
68 |
+
# making image prompt
|
69 |
+
if 'image' in _input.keys() and _input['image'] != None:
|
70 |
+
process_image = dynamic_preprocess(_input['image'].to(device))
|
71 |
+
dynamic_process_image = torch.stack([dynamic_transform(image) for image in process_image]).to(device)
|
72 |
+
img_token_number = dynamic_process_image.shape[0] * 256
|
73 |
+
batched_image.append(dynamic_process_image)
|
74 |
+
|
75 |
+
# make question and answer
|
76 |
+
question = _input['question']
|
77 |
+
|
78 |
+
# make instruction (qa pair) and label
|
79 |
+
qa_prompt = make_instruction(question, data, self.prompt_rule)
|
80 |
+
|
81 |
+
# adding image special tokens to question
|
82 |
+
if 'image' in _input.keys():
|
83 |
+
qa_prompt = qa_prompt.replace('<image>', '<img><IMG_CONTEXT></img>')
|
84 |
+
|
85 |
+
# add bundle image tokens if it has <image> token
|
86 |
+
qa_prompt = add_bundle_tokens(qa_prompt, '<IMG_CONTEXT>', img_token_number)
|
87 |
+
|
88 |
+
# phantom_position
|
89 |
+
label = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].to(device)
|
90 |
+
phantom_position = torch.zeros_like(label)
|
91 |
+
phantom_position[0] = 1
|
92 |
+
|
93 |
+
# batched processing
|
94 |
+
batched_qa_prompt.append(qa_prompt)
|
95 |
+
batched_phantom_position.append(phantom_position.flip(dims=[0]))
|
96 |
+
|
97 |
+
'''For Final Outputs'''
|
98 |
+
qa_prompts = tokenizer(batched_qa_prompt, padding='longest', return_tensors="pt", add_special_tokens=False)
|
99 |
+
|
100 |
+
# [1] input_ids
|
101 |
+
input_ids = qa_prompts.input_ids.to(device)
|
102 |
+
|
103 |
+
# [2] attention_mask
|
104 |
+
attention_mask = qa_prompts.attention_mask.to(device)
|
105 |
+
|
106 |
+
# [3] Phantom Position
|
107 |
+
batched_phantom_position = torch.nn.utils.rnn.pad_sequence(batched_phantom_position, batch_first=True, padding_value=0).flip(dims=[1]) # padding left
|
108 |
+
|
109 |
+
if len(batched_image):
|
110 |
+
return {"input_ids": input_ids,
|
111 |
+
"attention_mask": attention_mask,
|
112 |
+
"pixel_values": torch.cat(batched_image, dim=0).to(device),
|
113 |
+
"phantom_position": batched_phantom_position.bool()
|
114 |
+
}
|
115 |
+
else:
|
116 |
+
return {"input_ids": input_ids,
|
117 |
+
"attention_mask": attention_mask,
|
118 |
+
"phantom_position": batched_phantom_position.bool()
|
119 |
+
}
|
120 |
+
|
121 |
+
def extract_feature(self, pixel_values):
|
122 |
+
vit_embeds = self.vision_model(
|
123 |
+
pixel_values=pixel_values,
|
124 |
+
output_hidden_states=False,
|
125 |
+
return_dict=True).last_hidden_state
|
126 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
127 |
+
|
128 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
129 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
130 |
+
vit_embeds = pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
131 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
132 |
+
vit_embeds = self.vision_proj(vit_embeds)
|
133 |
+
return vit_embeds
|
134 |
+
|
135 |
+
@torch.no_grad()
|
136 |
+
def generate(
|
137 |
+
self,
|
138 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
139 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
140 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
141 |
+
phantom_position: torch.BoolTensor = None,
|
142 |
+
generation_config: Optional[GenerationConfig] = None,
|
143 |
+
output_hidden_states: Optional[bool] = None,
|
144 |
+
return_dict: Optional[bool] = None,
|
145 |
+
**generate_kwargs,
|
146 |
+
) -> torch.LongTensor:
|
147 |
+
|
148 |
+
if pixel_values is not None:
|
149 |
+
vit_embeds = self.extract_feature(pixel_values.to(torch.bfloat16))
|
150 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
151 |
+
B, N, C = input_embeds.shape
|
152 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
153 |
+
|
154 |
+
input_ids = input_ids.reshape(B * N)
|
155 |
+
selected = (input_ids == self.config.image_token_index)
|
156 |
+
assert selected.sum() != 0
|
157 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
158 |
+
|
159 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
160 |
+
else:
|
161 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
162 |
+
|
163 |
+
outputs = self.language_model.generate(
|
164 |
+
inputs_embeds=input_embeds,
|
165 |
+
attention_mask=attention_mask,
|
166 |
+
phantom_position=phantom_position,
|
167 |
+
generation_config=generation_config,
|
168 |
+
output_hidden_states=output_hidden_states,
|
169 |
+
return_dict=return_dict,
|
170 |
+
use_cache=True,
|
171 |
+
pad_token_id=self.config.eos_token_id,
|
172 |
+
eos_token_id=self.config.eos_token_id,
|
173 |
+
**generate_kwargs,
|
174 |
+
)
|
175 |
+
|
176 |
+
return outputs
|
model/arch_1_8b/tokenization_internlm2.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
"""Tokenization classes for InternLM."""
|
18 |
+
import os
|
19 |
+
from shutil import copyfile
|
20 |
+
from typing import Any, Dict, List, Optional, Tuple
|
21 |
+
|
22 |
+
import sentencepiece as spm
|
23 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
24 |
+
from transformers.utils import logging
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
|
29 |
+
|
30 |
+
PRETRAINED_VOCAB_FILES_MAP = {}
|
31 |
+
|
32 |
+
|
33 |
+
# Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
|
34 |
+
class InternLM2Tokenizer(PreTrainedTokenizer):
|
35 |
+
"""
|
36 |
+
Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_file (`str`):
|
40 |
+
Path to the vocabulary file.
|
41 |
+
"""
|
42 |
+
|
43 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
44 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
45 |
+
model_input_names = ['input_ids', 'attention_mask']
|
46 |
+
_auto_class = 'AutoTokenizer'
|
47 |
+
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
vocab_file,
|
51 |
+
unk_token='<unk>',
|
52 |
+
bos_token='<s>',
|
53 |
+
eos_token='</s>',
|
54 |
+
pad_token='</s>',
|
55 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
56 |
+
add_bos_token=True,
|
57 |
+
add_eos_token=False,
|
58 |
+
decode_with_prefix_space=False,
|
59 |
+
clean_up_tokenization_spaces=False,
|
60 |
+
**kwargs,
|
61 |
+
):
|
62 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
63 |
+
self.vocab_file = vocab_file
|
64 |
+
self.add_bos_token = add_bos_token
|
65 |
+
self.add_eos_token = add_eos_token
|
66 |
+
self.decode_with_prefix_space = decode_with_prefix_space
|
67 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
68 |
+
self.sp_model.Load(vocab_file)
|
69 |
+
self._no_prefix_space_tokens = None
|
70 |
+
super().__init__(
|
71 |
+
bos_token=bos_token,
|
72 |
+
eos_token=eos_token,
|
73 |
+
unk_token=unk_token,
|
74 |
+
pad_token=pad_token,
|
75 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
76 |
+
**kwargs,
|
77 |
+
)
|
78 |
+
|
79 |
+
@property
|
80 |
+
def no_prefix_space_tokens(self):
|
81 |
+
if self._no_prefix_space_tokens is None:
|
82 |
+
vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
|
83 |
+
self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
|
84 |
+
return self._no_prefix_space_tokens
|
85 |
+
|
86 |
+
@property
|
87 |
+
def vocab_size(self):
|
88 |
+
"""Returns vocab size"""
|
89 |
+
return self.sp_model.get_piece_size()
|
90 |
+
|
91 |
+
@property
|
92 |
+
def bos_token_id(self) -> Optional[int]:
|
93 |
+
return self.sp_model.bos_id()
|
94 |
+
|
95 |
+
@property
|
96 |
+
def eos_token_id(self) -> Optional[int]:
|
97 |
+
return self.sp_model.eos_id()
|
98 |
+
|
99 |
+
def get_vocab(self):
|
100 |
+
"""Returns vocab as a dict"""
|
101 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
102 |
+
vocab.update(self.added_tokens_encoder)
|
103 |
+
return vocab
|
104 |
+
|
105 |
+
def _tokenize(self, text):
|
106 |
+
"""Returns a tokenized string."""
|
107 |
+
return self.sp_model.encode(text, out_type=str)
|
108 |
+
|
109 |
+
def _convert_token_to_id(self, token):
|
110 |
+
"""Converts a token (str) in an id using the vocab."""
|
111 |
+
return self.sp_model.piece_to_id(token)
|
112 |
+
|
113 |
+
def _convert_id_to_token(self, index):
|
114 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
115 |
+
token = self.sp_model.IdToPiece(index)
|
116 |
+
return token
|
117 |
+
|
118 |
+
def _maybe_add_prefix_space(self, tokens, decoded):
|
119 |
+
if tokens and tokens[0] not in self.no_prefix_space_tokens:
|
120 |
+
return ' ' + decoded
|
121 |
+
else:
|
122 |
+
return decoded
|
123 |
+
|
124 |
+
def convert_tokens_to_string(self, tokens):
|
125 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
126 |
+
current_sub_tokens = []
|
127 |
+
out_string = ''
|
128 |
+
prev_is_special = False
|
129 |
+
for token in tokens:
|
130 |
+
# make sure that special tokens are not decoded using sentencepiece model
|
131 |
+
if token in self.all_special_tokens:
|
132 |
+
if not prev_is_special:
|
133 |
+
out_string += ' '
|
134 |
+
out_string += self.sp_model.decode(current_sub_tokens) + token
|
135 |
+
prev_is_special = True
|
136 |
+
current_sub_tokens = []
|
137 |
+
else:
|
138 |
+
current_sub_tokens.append(token)
|
139 |
+
prev_is_special = False
|
140 |
+
out_string += self.sp_model.decode(current_sub_tokens)
|
141 |
+
out_string = self.clean_up_tokenization(out_string)
|
142 |
+
out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
|
143 |
+
return out_string[1:]
|
144 |
+
|
145 |
+
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
146 |
+
"""
|
147 |
+
Save the vocabulary and special tokens file to a directory.
|
148 |
+
|
149 |
+
Args:
|
150 |
+
save_directory (`str`):
|
151 |
+
The directory in which to save the vocabulary.
|
152 |
+
|
153 |
+
Returns:
|
154 |
+
`Tuple(str)`: Paths to the files saved.
|
155 |
+
"""
|
156 |
+
if not os.path.isdir(save_directory):
|
157 |
+
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
|
158 |
+
return
|
159 |
+
out_vocab_file = os.path.join(
|
160 |
+
save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
|
161 |
+
)
|
162 |
+
|
163 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
164 |
+
copyfile(self.vocab_file, out_vocab_file)
|
165 |
+
elif not os.path.isfile(self.vocab_file):
|
166 |
+
with open(out_vocab_file, 'wb') as fi:
|
167 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
168 |
+
fi.write(content_spiece_model)
|
169 |
+
|
170 |
+
return (out_vocab_file,)
|
171 |
+
|
172 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
173 |
+
if self.add_bos_token:
|
174 |
+
bos_token_ids = [self.bos_token_id]
|
175 |
+
else:
|
176 |
+
bos_token_ids = []
|
177 |
+
|
178 |
+
output = bos_token_ids + token_ids_0
|
179 |
+
|
180 |
+
if token_ids_1 is not None:
|
181 |
+
output = output + token_ids_1
|
182 |
+
|
183 |
+
if self.add_eos_token:
|
184 |
+
output = output + [self.eos_token_id]
|
185 |
+
|
186 |
+
return output
|
187 |
+
|
188 |
+
def get_special_tokens_mask(
|
189 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
190 |
+
) -> List[int]:
|
191 |
+
"""
|
192 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
193 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
token_ids_0 (`List[int]`):
|
197 |
+
List of IDs.
|
198 |
+
token_ids_1 (`List[int]`, *optional*):
|
199 |
+
Optional second list of IDs for sequence pairs.
|
200 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
201 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
202 |
+
|
203 |
+
Returns:
|
204 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
205 |
+
"""
|
206 |
+
if already_has_special_tokens:
|
207 |
+
return super().get_special_tokens_mask(
|
208 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
209 |
+
)
|
210 |
+
|
211 |
+
if token_ids_1 is None:
|
212 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
213 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
214 |
+
|
215 |
+
def create_token_type_ids_from_sequences(
|
216 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
217 |
+
) -> List[int]:
|
218 |
+
"""
|
219 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
|
220 |
+
use of token type ids, therefore a list of zeros is returned.
|
221 |
+
|
222 |
+
Args:
|
223 |
+
token_ids_0 (`List[int]`):
|
224 |
+
List of IDs.
|
225 |
+
token_ids_1 (`List[int]`, *optional*):
|
226 |
+
Optional second list of IDs for sequence pairs.
|
227 |
+
|
228 |
+
Returns:
|
229 |
+
`List[int]`: List of zeros.
|
230 |
+
"""
|
231 |
+
eos = [self.eos_token_id]
|
232 |
+
|
233 |
+
if token_ids_1 is None:
|
234 |
+
return len(token_ids_0 + eos) * [0]
|
235 |
+
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
model/arch_3_8b/configuration_intern_vit.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
logger = logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
class InternVisionConfig(PretrainedConfig):
|
11 |
+
r"""
|
12 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
13 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
14 |
+
|
15 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
16 |
+
documentation from [`PretrainedConfig`] for more information.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
num_channels (`int`, *optional*, defaults to 3):
|
20 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
21 |
+
patch_size (`int`, *optional*, defaults to 14):
|
22 |
+
The size (resolution) of each patch.
|
23 |
+
image_size (`int`, *optional*, defaults to 224):
|
24 |
+
The size (resolution) of each image.
|
25 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
26 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
27 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
28 |
+
Dimensionality of the encoder layers and the pooler layer.
|
29 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
30 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
31 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
32 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
33 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
34 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
35 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
36 |
+
Number of hidden layers in the Transformer encoder.
|
37 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
38 |
+
Whether to use flash attention mechanism.
|
39 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
40 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
41 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
42 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
43 |
+
The epsilon used by the layer normalization layers.
|
44 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
45 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
46 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
47 |
+
Dropout rate for stochastic depth.
|
48 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
49 |
+
The dropout ratio for the attention probabilities.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
53 |
+
A factor for layer scale.
|
54 |
+
"""
|
55 |
+
|
56 |
+
model_type = 'intern_vit_300m'
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
num_channels=3,
|
61 |
+
patch_size=14,
|
62 |
+
image_size=224,
|
63 |
+
qkv_bias=False,
|
64 |
+
hidden_size=3200,
|
65 |
+
num_attention_heads=25,
|
66 |
+
intermediate_size=12800,
|
67 |
+
qk_normalization=True,
|
68 |
+
num_hidden_layers=48,
|
69 |
+
use_flash_attn=True,
|
70 |
+
hidden_act='gelu',
|
71 |
+
norm_type='rms_norm',
|
72 |
+
layer_norm_eps=1e-6,
|
73 |
+
dropout=0.0,
|
74 |
+
drop_path_rate=0.0,
|
75 |
+
attention_dropout=0.0,
|
76 |
+
initializer_range=0.02,
|
77 |
+
initializer_factor=0.1,
|
78 |
+
**kwargs,
|
79 |
+
):
|
80 |
+
super().__init__(**kwargs)
|
81 |
+
|
82 |
+
self.hidden_size = hidden_size
|
83 |
+
self.intermediate_size = intermediate_size
|
84 |
+
self.dropout = dropout
|
85 |
+
self.drop_path_rate = drop_path_rate
|
86 |
+
self.num_hidden_layers = num_hidden_layers
|
87 |
+
self.num_attention_heads = num_attention_heads
|
88 |
+
self.num_channels = num_channels
|
89 |
+
self.patch_size = patch_size
|
90 |
+
self.image_size = image_size
|
91 |
+
self.initializer_range = initializer_range
|
92 |
+
self.initializer_factor = initializer_factor
|
93 |
+
self.attention_dropout = attention_dropout
|
94 |
+
self.layer_norm_eps = layer_norm_eps
|
95 |
+
self.hidden_act = hidden_act
|
96 |
+
self.norm_type = norm_type
|
97 |
+
self.qkv_bias = qkv_bias
|
98 |
+
self.qk_normalization = qk_normalization
|
99 |
+
self.use_flash_attn = use_flash_attn
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
103 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
104 |
+
|
105 |
+
if 'vision_config' in config_dict:
|
106 |
+
config_dict = config_dict['vision_config']
|
107 |
+
|
108 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
109 |
+
logger.warning(
|
110 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
111 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
112 |
+
)
|
113 |
+
|
114 |
+
return cls.from_dict(config_dict, **kwargs)
|
model/arch_3_8b/configuration_phantom.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
|
3 |
+
from transformers import LlamaConfig
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
from .configuration_intern_vit import InternVisionConfig
|
8 |
+
from .configuration_phi3 import Phi3Config
|
9 |
+
|
10 |
+
logger = logging.get_logger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
class PhantomConfig(PretrainedConfig):
|
14 |
+
model_type = 'phantom'
|
15 |
+
is_composition = True
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
vision_config=None,
|
20 |
+
llm_config=None,
|
21 |
+
use_backbone_lora=0,
|
22 |
+
use_llm_lora=0,
|
23 |
+
force_image_size=None,
|
24 |
+
downsample_ratio=0.5,
|
25 |
+
template=None,
|
26 |
+
dynamic_image_size=False,
|
27 |
+
use_thumbnail=False,
|
28 |
+
min_dynamic_patch=1,
|
29 |
+
max_dynamic_patch=6,
|
30 |
+
**kwargs):
|
31 |
+
super().__init__(**kwargs)
|
32 |
+
|
33 |
+
if vision_config is None:
|
34 |
+
vision_config = {}
|
35 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
36 |
+
|
37 |
+
if llm_config is None:
|
38 |
+
llm_config = {}
|
39 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
40 |
+
|
41 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
42 |
+
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
43 |
+
self.llm_config = LlamaConfig(**llm_config)
|
44 |
+
elif llm_config['architectures'][0] == 'Phi3ForCausalLM':
|
45 |
+
self.llm_config = Phi3Config(**llm_config)
|
46 |
+
else:
|
47 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
48 |
+
self.use_backbone_lora = use_backbone_lora
|
49 |
+
self.use_llm_lora = use_llm_lora
|
50 |
+
self.force_image_size = force_image_size
|
51 |
+
self.downsample_ratio = downsample_ratio
|
52 |
+
self.template = template
|
53 |
+
self.dynamic_image_size = dynamic_image_size
|
54 |
+
self.use_thumbnail = use_thumbnail
|
55 |
+
self.min_dynamic_patch = min_dynamic_patch
|
56 |
+
self.max_dynamic_patch = max_dynamic_patch
|
57 |
+
|
58 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
59 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
60 |
+
|
61 |
+
def to_dict(self):
|
62 |
+
"""
|
63 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
67 |
+
"""
|
68 |
+
output = copy.deepcopy(self.__dict__)
|
69 |
+
output['vision_config'] = self.vision_config.to_dict()
|
70 |
+
output['llm_config'] = self.llm_config.to_dict()
|
71 |
+
output['model_type'] = self.__class__.model_type
|
72 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
73 |
+
output['use_llm_lora'] = self.use_llm_lora
|
74 |
+
output['force_image_size'] = self.force_image_size
|
75 |
+
output['downsample_ratio'] = self.downsample_ratio
|
76 |
+
output['template'] = self.template
|
77 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
78 |
+
output['use_thumbnail'] = self.use_thumbnail
|
79 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
80 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
81 |
+
|
82 |
+
return output
|
model/arch_3_8b/configuration_phi3.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License atd
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
""" Phi-3 model configuration"""
|
16 |
+
|
17 |
+
|
18 |
+
from transformers.configuration_utils import PretrainedConfig
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
PHI3_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
24 |
+
'microsoft/Phi-3-mini-4k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/config.json',
|
25 |
+
'microsoft/Phi-3-mini-128k-instruct': 'https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/config.json',
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
class Phi3Config(PretrainedConfig):
|
30 |
+
r"""
|
31 |
+
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
|
32 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
33 |
+
defaults will yield a similar configuration to that of the
|
34 |
+
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
|
35 |
+
|
36 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
37 |
+
documentation from [`PretrainedConfig`] for more information.
|
38 |
+
|
39 |
+
Args:
|
40 |
+
vocab_size (`int`, *optional*, defaults to 32064):
|
41 |
+
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
|
42 |
+
`inputs_ids` passed when calling [`Phi3Model`].
|
43 |
+
hidden_size (`int`, *optional*, defaults to 3072):
|
44 |
+
Dimension of the hidden representations.
|
45 |
+
intermediate_size (`int`, *optional*, defaults to 8192):
|
46 |
+
Dimension of the MLP representations.
|
47 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
48 |
+
Number of hidden layers in the Transformer decoder.
|
49 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
50 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
51 |
+
num_key_value_heads (`int`, *optional*):
|
52 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
53 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
54 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
55 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
56 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
57 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
58 |
+
`num_attention_heads`.
|
59 |
+
resid_pdrop (`float`, *optional*, defaults to 0.0):
|
60 |
+
Dropout probability for mlp outputs.
|
61 |
+
embd_pdrop (`int`, *optional*, defaults to 0.0):
|
62 |
+
The dropout ratio for the embeddings.
|
63 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
64 |
+
The dropout ratio after computing the attention scores.
|
65 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
66 |
+
The non-linear activation function (function or string) in the decoder.
|
67 |
+
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
68 |
+
The maximum sequence length that this model might ever be used with.
|
69 |
+
original_max_position_embeddings (`int`, *optional*, defaults to 4096):
|
70 |
+
The maximum sequence length that this model was trained with. This is used to determine the size of the
|
71 |
+
original RoPE embeddings when using long scaling.
|
72 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
73 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
74 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
75 |
+
The epsilon value used for the RMSNorm.
|
76 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
77 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
78 |
+
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
|
79 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
80 |
+
Whether to tie weight embeddings
|
81 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
82 |
+
The base period of the RoPE embeddings.
|
83 |
+
rope_scaling (`dict`, *optional*):
|
84 |
+
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
|
85 |
+
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be either `su` or `yarn` and
|
86 |
+
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
|
87 |
+
divided by the number of attention heads divided by 2.
|
88 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
89 |
+
The id of the "beginning-of-sequence" token.
|
90 |
+
eos_token_id (`int`, *optional*, defaults to 32000):
|
91 |
+
The id of the "end-of-sequence" token.
|
92 |
+
pad_token_id (`int`, *optional*, defaults to 32000):
|
93 |
+
The id of the padding token.
|
94 |
+
sliding_window (`int`, *optional*):
|
95 |
+
Sliding window attention window size. If `None`, no sliding window is applied.
|
96 |
+
|
97 |
+
Example:
|
98 |
+
|
99 |
+
```python
|
100 |
+
>>> from transformers import Phi3Model, Phi3Config
|
101 |
+
|
102 |
+
>>> # Initializing a Phi-3 style configuration
|
103 |
+
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
104 |
+
|
105 |
+
>>> # Initializing a model from the configuration
|
106 |
+
>>> model = Phi3Model(configuration)
|
107 |
+
|
108 |
+
>>> # Accessing the model configuration
|
109 |
+
>>> configuration = model.config
|
110 |
+
```"""
|
111 |
+
|
112 |
+
model_type = 'phi3'
|
113 |
+
keys_to_ignore_at_inference = ['past_key_values']
|
114 |
+
|
115 |
+
def __init__(
|
116 |
+
self,
|
117 |
+
vocab_size=32064,
|
118 |
+
hidden_size=3072,
|
119 |
+
intermediate_size=8192,
|
120 |
+
num_hidden_layers=32,
|
121 |
+
num_attention_heads=32,
|
122 |
+
num_key_value_heads=None,
|
123 |
+
resid_pdrop=0.0,
|
124 |
+
embd_pdrop=0.0,
|
125 |
+
attention_dropout=0.0,
|
126 |
+
hidden_act='silu',
|
127 |
+
max_position_embeddings=4096,
|
128 |
+
original_max_position_embeddings=4096,
|
129 |
+
initializer_range=0.02,
|
130 |
+
rms_norm_eps=1e-5,
|
131 |
+
use_cache=True,
|
132 |
+
tie_word_embeddings=False,
|
133 |
+
rope_theta=10000.0,
|
134 |
+
rope_scaling=None,
|
135 |
+
bos_token_id=1,
|
136 |
+
eos_token_id=32000,
|
137 |
+
pad_token_id=32000,
|
138 |
+
sliding_window=None,
|
139 |
+
**kwargs,
|
140 |
+
):
|
141 |
+
self.vocab_size = vocab_size
|
142 |
+
self.hidden_size = hidden_size
|
143 |
+
self.intermediate_size = intermediate_size
|
144 |
+
self.num_hidden_layers = num_hidden_layers
|
145 |
+
self.num_attention_heads = num_attention_heads
|
146 |
+
|
147 |
+
if num_key_value_heads is None:
|
148 |
+
num_key_value_heads = num_attention_heads
|
149 |
+
|
150 |
+
self.num_key_value_heads = num_key_value_heads
|
151 |
+
self.resid_pdrop = resid_pdrop
|
152 |
+
self.embd_pdrop = embd_pdrop
|
153 |
+
self.attention_dropout = attention_dropout
|
154 |
+
self.hidden_act = hidden_act
|
155 |
+
self.max_position_embeddings = max_position_embeddings
|
156 |
+
self.original_max_position_embeddings = original_max_position_embeddings
|
157 |
+
self.initializer_range = initializer_range
|
158 |
+
self.rms_norm_eps = rms_norm_eps
|
159 |
+
self.use_cache = use_cache
|
160 |
+
self.rope_theta = rope_theta
|
161 |
+
self.rope_scaling = rope_scaling
|
162 |
+
self._rope_scaling_validation()
|
163 |
+
self.sliding_window = sliding_window
|
164 |
+
|
165 |
+
super().__init__(
|
166 |
+
bos_token_id=bos_token_id,
|
167 |
+
eos_token_id=eos_token_id,
|
168 |
+
pad_token_id=pad_token_id,
|
169 |
+
tie_word_embeddings=tie_word_embeddings,
|
170 |
+
**kwargs,
|
171 |
+
)
|
172 |
+
|
173 |
+
def _rope_scaling_validation(self):
|
174 |
+
"""
|
175 |
+
Validate the `rope_scaling` configuration.
|
176 |
+
"""
|
177 |
+
if self.rope_scaling is None:
|
178 |
+
return
|
179 |
+
|
180 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
|
181 |
+
raise ValueError(
|
182 |
+
'`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, '
|
183 |
+
f'got {self.rope_scaling}'
|
184 |
+
)
|
185 |
+
rope_scaling_type = self.rope_scaling.get('type', None)
|
186 |
+
rope_scaling_short_factor = self.rope_scaling.get('short_factor', None)
|
187 |
+
rope_scaling_long_factor = self.rope_scaling.get('long_factor', None)
|
188 |
+
if rope_scaling_type is None or rope_scaling_type not in ['su', 'yarn']:
|
189 |
+
raise ValueError(f"`rope_scaling`'s type field must be one of ['su', 'yarn'], got {rope_scaling_type}")
|
190 |
+
if not (
|
191 |
+
isinstance(rope_scaling_short_factor, list)
|
192 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
|
193 |
+
):
|
194 |
+
raise ValueError(
|
195 |
+
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
|
196 |
+
)
|
197 |
+
if not len(rope_scaling_short_factor) == self.hidden_size // self.num_attention_heads // 2:
|
198 |
+
raise ValueError(
|
199 |
+
f"`rope_scaling`'s short_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_short_factor)}"
|
200 |
+
)
|
201 |
+
if not (
|
202 |
+
isinstance(rope_scaling_long_factor, list)
|
203 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
|
204 |
+
):
|
205 |
+
raise ValueError(
|
206 |
+
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
|
207 |
+
)
|
208 |
+
if not len(rope_scaling_long_factor) == self.hidden_size // self.num_attention_heads // 2:
|
209 |
+
raise ValueError(
|
210 |
+
f"`rope_scaling`'s long_factor field must have length {self.hidden_size // self.num_attention_heads // 2}, got {len(rope_scaling_long_factor)}"
|
211 |
+
)
|
model/arch_3_8b/modeling_intern_vit.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.utils.checkpoint
|
6 |
+
from einops import rearrange
|
7 |
+
from timm.models.layers import DropPath
|
8 |
+
from torch import nn
|
9 |
+
from transformers.activations import ACT2FN
|
10 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
11 |
+
BaseModelOutputWithPooling)
|
12 |
+
from transformers.modeling_utils import PreTrainedModel
|
13 |
+
from transformers.utils import logging
|
14 |
+
|
15 |
+
from .configuration_intern_vit import InternVisionConfig
|
16 |
+
|
17 |
+
try:
|
18 |
+
try: # v1
|
19 |
+
from flash_attn.flash_attn_interface import \
|
20 |
+
flash_attn_unpadded_qkvpacked_func
|
21 |
+
except: # v2
|
22 |
+
from flash_attn.flash_attn_interface import \
|
23 |
+
flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
24 |
+
|
25 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
26 |
+
|
27 |
+
has_flash_attn = True
|
28 |
+
except:
|
29 |
+
print('FlashAttention is not installed.')
|
30 |
+
has_flash_attn = False
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
class FlashAttention(nn.Module):
|
36 |
+
"""Implement the scaled dot product attention with softmax.
|
37 |
+
Arguments
|
38 |
+
---------
|
39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
41 |
+
runtime)
|
42 |
+
attention_dropout: The dropout rate to apply to the attention
|
43 |
+
(default: 0.0)
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
47 |
+
super().__init__()
|
48 |
+
self.softmax_scale = softmax_scale
|
49 |
+
self.dropout_p = attention_dropout
|
50 |
+
|
51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
52 |
+
max_s=None, need_weights=False):
|
53 |
+
"""Implements the multihead softmax attention.
|
54 |
+
Arguments
|
55 |
+
---------
|
56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
57 |
+
if unpadded: (nnz, 3, h, d)
|
58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
59 |
+
"""
|
60 |
+
assert not need_weights
|
61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
62 |
+
assert qkv.is_cuda
|
63 |
+
|
64 |
+
if cu_seqlens is None:
|
65 |
+
batch_size = qkv.shape[0]
|
66 |
+
seqlen = qkv.shape[1]
|
67 |
+
if key_padding_mask is None:
|
68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
69 |
+
max_s = seqlen
|
70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
71 |
+
device=qkv.device)
|
72 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
75 |
+
)
|
76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
77 |
+
else:
|
78 |
+
nheads = qkv.shape[-2]
|
79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
82 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
85 |
+
)
|
86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
87 |
+
indices, batch_size, seqlen),
|
88 |
+
'b s (h d) -> b s h d', h=nheads)
|
89 |
+
else:
|
90 |
+
assert max_s is not None
|
91 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
94 |
+
)
|
95 |
+
|
96 |
+
return output, None
|
97 |
+
|
98 |
+
|
99 |
+
class InternRMSNorm(nn.Module):
|
100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
101 |
+
super().__init__()
|
102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
103 |
+
self.variance_epsilon = eps
|
104 |
+
|
105 |
+
def forward(self, hidden_states):
|
106 |
+
input_dtype = hidden_states.dtype
|
107 |
+
hidden_states = hidden_states.to(torch.float32)
|
108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
110 |
+
return self.weight * hidden_states.to(input_dtype)
|
111 |
+
|
112 |
+
|
113 |
+
try:
|
114 |
+
from apex.normalization import FusedRMSNorm
|
115 |
+
|
116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
117 |
+
|
118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
119 |
+
except ImportError:
|
120 |
+
# using the normal InternRMSNorm
|
121 |
+
pass
|
122 |
+
except Exception:
|
123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
NORM2FN = {
|
128 |
+
'rms_norm': InternRMSNorm,
|
129 |
+
'layer_norm': nn.LayerNorm,
|
130 |
+
}
|
131 |
+
|
132 |
+
|
133 |
+
class InternVisionEmbeddings(nn.Module):
|
134 |
+
def __init__(self, config: InternVisionConfig):
|
135 |
+
super().__init__()
|
136 |
+
self.config = config
|
137 |
+
self.embed_dim = config.hidden_size
|
138 |
+
self.image_size = config.image_size
|
139 |
+
self.patch_size = config.patch_size
|
140 |
+
|
141 |
+
self.class_embedding = nn.Parameter(
|
142 |
+
torch.randn(1, 1, self.embed_dim),
|
143 |
+
)
|
144 |
+
|
145 |
+
self.patch_embedding = nn.Conv2d(
|
146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
147 |
+
)
|
148 |
+
|
149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
150 |
+
self.num_positions = self.num_patches + 1
|
151 |
+
|
152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
153 |
+
|
154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
155 |
+
target_dtype = pos_embed.dtype
|
156 |
+
pos_embed = pos_embed.float().reshape(
|
157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
160 |
+
return pos_embed
|
161 |
+
|
162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
165 |
+
batch_size, _, height, width = patch_embeds.shape
|
166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
169 |
+
position_embedding = torch.cat([
|
170 |
+
self.position_embedding[:, :1, :],
|
171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
172 |
+
], dim=1)
|
173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
174 |
+
return embeddings
|
175 |
+
|
176 |
+
|
177 |
+
class InternAttention(nn.Module):
|
178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
179 |
+
|
180 |
+
def __init__(self, config: InternVisionConfig):
|
181 |
+
super().__init__()
|
182 |
+
self.config = config
|
183 |
+
self.embed_dim = config.hidden_size
|
184 |
+
self.num_heads = config.num_attention_heads
|
185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
186 |
+
if config.use_flash_attn and not has_flash_attn:
|
187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
190 |
+
raise ValueError(
|
191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
192 |
+
f' {self.num_heads}).'
|
193 |
+
)
|
194 |
+
|
195 |
+
self.scale = self.head_dim ** -0.5
|
196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
199 |
+
|
200 |
+
self.qk_normalization = config.qk_normalization
|
201 |
+
|
202 |
+
if self.qk_normalization:
|
203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
205 |
+
|
206 |
+
if self.use_flash_attn:
|
207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
209 |
+
|
210 |
+
def _naive_attn(self, x):
|
211 |
+
B, N, C = x.shape
|
212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
214 |
+
|
215 |
+
if self.qk_normalization:
|
216 |
+
B_, H_, N_, D_ = q.shape
|
217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
219 |
+
|
220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
221 |
+
attn = attn.softmax(dim=-1)
|
222 |
+
attn = self.attn_drop(attn)
|
223 |
+
|
224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
225 |
+
x = self.proj(x)
|
226 |
+
x = self.proj_drop(x)
|
227 |
+
return x
|
228 |
+
|
229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
230 |
+
qkv = self.qkv(x)
|
231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
232 |
+
|
233 |
+
if self.qk_normalization:
|
234 |
+
q, k, v = qkv.unbind(2)
|
235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
238 |
+
|
239 |
+
context, _ = self.inner_attn(
|
240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
241 |
+
)
|
242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
243 |
+
outs = self.proj_drop(outs)
|
244 |
+
return outs
|
245 |
+
|
246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
248 |
+
return x
|
249 |
+
|
250 |
+
|
251 |
+
class InternMLP(nn.Module):
|
252 |
+
def __init__(self, config: InternVisionConfig):
|
253 |
+
super().__init__()
|
254 |
+
self.config = config
|
255 |
+
self.act = ACT2FN[config.hidden_act]
|
256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
258 |
+
|
259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
260 |
+
hidden_states = self.fc1(hidden_states)
|
261 |
+
hidden_states = self.act(hidden_states)
|
262 |
+
hidden_states = self.fc2(hidden_states)
|
263 |
+
return hidden_states
|
264 |
+
|
265 |
+
|
266 |
+
class InternVisionEncoderLayer(nn.Module):
|
267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
268 |
+
super().__init__()
|
269 |
+
self.embed_dim = config.hidden_size
|
270 |
+
self.intermediate_size = config.intermediate_size
|
271 |
+
self.norm_type = config.norm_type
|
272 |
+
|
273 |
+
self.attn = InternAttention(config)
|
274 |
+
self.mlp = InternMLP(config)
|
275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
277 |
+
|
278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
282 |
+
|
283 |
+
def forward(
|
284 |
+
self,
|
285 |
+
hidden_states: torch.Tensor,
|
286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
287 |
+
"""
|
288 |
+
Args:
|
289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
290 |
+
"""
|
291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
|
292 |
+
|
293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
|
294 |
+
|
295 |
+
return hidden_states
|
296 |
+
|
297 |
+
|
298 |
+
class InternVisionEncoder(nn.Module):
|
299 |
+
"""
|
300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
301 |
+
[`InternEncoderLayer`].
|
302 |
+
|
303 |
+
Args:
|
304 |
+
config (`InternConfig`):
|
305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
306 |
+
"""
|
307 |
+
|
308 |
+
def __init__(self, config: InternVisionConfig):
|
309 |
+
super().__init__()
|
310 |
+
self.config = config
|
311 |
+
# stochastic depth decay rule
|
312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
313 |
+
self.layers = nn.ModuleList([
|
314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
315 |
+
self.gradient_checkpointing = False
|
316 |
+
|
317 |
+
def forward(
|
318 |
+
self,
|
319 |
+
inputs_embeds,
|
320 |
+
output_hidden_states: Optional[bool] = None,
|
321 |
+
return_dict: Optional[bool] = None,
|
322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
323 |
+
r"""
|
324 |
+
Args:
|
325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
327 |
+
output_hidden_states (`bool`, *optional*):
|
328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
329 |
+
for more detail.
|
330 |
+
return_dict (`bool`, *optional*):
|
331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
332 |
+
"""
|
333 |
+
output_hidden_states = (
|
334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
335 |
+
)
|
336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
337 |
+
|
338 |
+
encoder_states = () if output_hidden_states else None
|
339 |
+
hidden_states = inputs_embeds
|
340 |
+
|
341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
342 |
+
if output_hidden_states:
|
343 |
+
encoder_states = encoder_states + (hidden_states,)
|
344 |
+
if self.gradient_checkpointing and self.training:
|
345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
346 |
+
encoder_layer,
|
347 |
+
hidden_states)
|
348 |
+
else:
|
349 |
+
layer_outputs = encoder_layer(
|
350 |
+
hidden_states,
|
351 |
+
)
|
352 |
+
hidden_states = layer_outputs
|
353 |
+
|
354 |
+
if output_hidden_states:
|
355 |
+
encoder_states = encoder_states + (hidden_states,)
|
356 |
+
|
357 |
+
if not return_dict:
|
358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
359 |
+
return BaseModelOutput(
|
360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
class InternVisionModel(PreTrainedModel):
|
365 |
+
main_input_name = 'pixel_values'
|
366 |
+
_supports_flash_attn_2 = True
|
367 |
+
config_class = InternVisionConfig
|
368 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
369 |
+
|
370 |
+
def __init__(self, config: InternVisionConfig):
|
371 |
+
super().__init__(config)
|
372 |
+
self.config = config
|
373 |
+
|
374 |
+
self.embeddings = InternVisionEmbeddings(config)
|
375 |
+
self.encoder = InternVisionEncoder(config)
|
376 |
+
|
377 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
378 |
+
pos_emb = self.embeddings.position_embedding
|
379 |
+
_, num_positions, embed_dim = pos_emb.shape
|
380 |
+
cls_emb = pos_emb[:, :1, :]
|
381 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
382 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
383 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
384 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
385 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
386 |
+
self.embeddings.image_size = new_size
|
387 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
388 |
+
|
389 |
+
def get_input_embeddings(self):
|
390 |
+
return self.embeddings
|
391 |
+
|
392 |
+
def forward(
|
393 |
+
self,
|
394 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
395 |
+
output_hidden_states: Optional[bool] = None,
|
396 |
+
return_dict: Optional[bool] = None,
|
397 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
398 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
399 |
+
output_hidden_states = (
|
400 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
401 |
+
)
|
402 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
403 |
+
|
404 |
+
if pixel_values is None and pixel_embeds is None:
|
405 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
406 |
+
|
407 |
+
if pixel_embeds is not None:
|
408 |
+
hidden_states = pixel_embeds
|
409 |
+
else:
|
410 |
+
if len(pixel_values.shape) == 4:
|
411 |
+
hidden_states = self.embeddings(pixel_values)
|
412 |
+
else:
|
413 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
414 |
+
encoder_outputs = self.encoder(
|
415 |
+
inputs_embeds=hidden_states,
|
416 |
+
output_hidden_states=output_hidden_states,
|
417 |
+
return_dict=return_dict,
|
418 |
+
)
|
419 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
420 |
+
pooled_output = last_hidden_state[:, 0, :]
|
421 |
+
|
422 |
+
if not return_dict:
|
423 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
424 |
+
|
425 |
+
return BaseModelOutputWithPooling(
|
426 |
+
last_hidden_state=last_hidden_state,
|
427 |
+
pooler_output=pooled_output,
|
428 |
+
hidden_states=encoder_outputs.hidden_states,
|
429 |
+
attentions=encoder_outputs.attentions,
|
430 |
+
)
|
model/arch_3_8b/modeling_phantom.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch.utils.checkpoint
|
4 |
+
from torch import nn
|
5 |
+
from transformers import GenerationConfig
|
6 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
7 |
+
from transformers.modeling_utils import PreTrainedModel
|
8 |
+
|
9 |
+
from .configuration_phantom import PhantomConfig
|
10 |
+
from .modeling_intern_vit import InternVisionModel
|
11 |
+
from .modeling_phi3 import Phi3ForCausalLM
|
12 |
+
|
13 |
+
from utils.utils import *
|
14 |
+
|
15 |
+
class PhantomForCausalLM(PreTrainedModel):
|
16 |
+
config_class = PhantomConfig
|
17 |
+
main_input_name = 'pixel_values'
|
18 |
+
_supports_flash_attn_2 = True
|
19 |
+
_no_split_modules = ['InternVisionModel', 'Phi3DecoderLayer']
|
20 |
+
|
21 |
+
def __init__(self, config: PhantomConfig):
|
22 |
+
super().__init__(config)
|
23 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
24 |
+
patch_size = config.vision_config.patch_size
|
25 |
+
self.patch_size = patch_size
|
26 |
+
self.template = config.template
|
27 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
28 |
+
self.downsample_ratio = config.downsample_ratio
|
29 |
+
|
30 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
31 |
+
self.language_model = Phi3ForCausalLM(config.llm_config)
|
32 |
+
|
33 |
+
vit_hidden_size = config.vision_config.hidden_size
|
34 |
+
llm_hidden_size = config.llm_config.hidden_size
|
35 |
+
|
36 |
+
self.vision_proj = nn.Sequential(
|
37 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
38 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
39 |
+
nn.GELU(),
|
40 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
41 |
+
)
|
42 |
+
|
43 |
+
# prompt rule
|
44 |
+
self.prompt_rule = {
|
45 |
+
"system_start": "<|system|>\n",
|
46 |
+
"system_end": "<|end|>",
|
47 |
+
"user_start": "<|user|>\n",
|
48 |
+
"user_end": "<|end|>",
|
49 |
+
"assistant_start": "<|assistant|>\n",
|
50 |
+
"assistant_end": "<|end|>",
|
51 |
+
"test_start": "<|assistant|>\n",
|
52 |
+
"test_end": "<|end|>",
|
53 |
+
"split": "",
|
54 |
+
}
|
55 |
+
|
56 |
+
def eval_process(
|
57 |
+
self,
|
58 |
+
inputs,
|
59 |
+
tokenizer,
|
60 |
+
data,
|
61 |
+
device,
|
62 |
+
):
|
63 |
+
batched_image=[]
|
64 |
+
batched_qa_prompt=[]
|
65 |
+
batched_phantom_position = []
|
66 |
+
for _input in inputs:
|
67 |
+
|
68 |
+
# making image prompt
|
69 |
+
if 'image' in _input.keys() and _input['image'] != None:
|
70 |
+
process_image = dynamic_preprocess(_input['image'].to(device))
|
71 |
+
dynamic_process_image = torch.stack([dynamic_transform(image) for image in process_image]).to(device)
|
72 |
+
img_token_number = dynamic_process_image.shape[0] * 256
|
73 |
+
batched_image.append(dynamic_process_image)
|
74 |
+
|
75 |
+
# make question and answer
|
76 |
+
question = _input['question']
|
77 |
+
|
78 |
+
# make instruction (qa pair) and label
|
79 |
+
qa_prompt = make_instruction(question, data, self.prompt_rule)
|
80 |
+
|
81 |
+
# adding image special tokens to question
|
82 |
+
if 'image' in _input.keys():
|
83 |
+
qa_prompt = qa_prompt.replace('<image>', '<img><IMG_CONTEXT></img>')
|
84 |
+
|
85 |
+
# add bundle image tokens if it has <image> token
|
86 |
+
qa_prompt = add_bundle_tokens(qa_prompt, '<IMG_CONTEXT>', img_token_number)
|
87 |
+
|
88 |
+
# phantom_position
|
89 |
+
label = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].to(device)
|
90 |
+
phantom_position = torch.zeros_like(label)
|
91 |
+
phantom_position[0] = 1
|
92 |
+
|
93 |
+
# batched processing
|
94 |
+
batched_qa_prompt.append(qa_prompt)
|
95 |
+
batched_phantom_position.append(phantom_position.flip(dims=[0]))
|
96 |
+
|
97 |
+
'''For Final Outputs'''
|
98 |
+
qa_prompts = tokenizer(batched_qa_prompt, padding='longest', return_tensors="pt", add_special_tokens=False)
|
99 |
+
|
100 |
+
# [1] input_ids
|
101 |
+
input_ids = qa_prompts.input_ids.to(device)
|
102 |
+
|
103 |
+
# [2] attention_mask
|
104 |
+
attention_mask = qa_prompts.attention_mask.to(device)
|
105 |
+
|
106 |
+
# [3] Phantom Position
|
107 |
+
batched_phantom_position = torch.nn.utils.rnn.pad_sequence(batched_phantom_position, batch_first=True, padding_value=0).flip(dims=[1]) # padding left
|
108 |
+
|
109 |
+
if len(batched_image):
|
110 |
+
return {"input_ids": input_ids,
|
111 |
+
"attention_mask": attention_mask,
|
112 |
+
"pixel_values": torch.cat(batched_image, dim=0).to(device),
|
113 |
+
"phantom_position": batched_phantom_position.bool()
|
114 |
+
}
|
115 |
+
else:
|
116 |
+
return {"input_ids": input_ids,
|
117 |
+
"attention_mask": attention_mask,
|
118 |
+
"phantom_position": batched_phantom_position.bool()
|
119 |
+
}
|
120 |
+
|
121 |
+
def extract_feature(self, pixel_values):
|
122 |
+
vit_embeds = self.vision_model(
|
123 |
+
pixel_values=pixel_values,
|
124 |
+
output_hidden_states=False,
|
125 |
+
return_dict=True).last_hidden_state
|
126 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
127 |
+
|
128 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
129 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
130 |
+
vit_embeds = pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
131 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
132 |
+
vit_embeds = self.vision_proj(vit_embeds)
|
133 |
+
return vit_embeds
|
134 |
+
|
135 |
+
|
136 |
+
@torch.no_grad()
|
137 |
+
def generate(
|
138 |
+
self,
|
139 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
140 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
141 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
142 |
+
phantom_position: torch.BoolTensor = None,
|
143 |
+
generation_config: Optional[GenerationConfig] = None,
|
144 |
+
output_hidden_states: Optional[bool] = None,
|
145 |
+
return_dict: Optional[bool] = None,
|
146 |
+
**generate_kwargs,
|
147 |
+
) -> torch.LongTensor:
|
148 |
+
|
149 |
+
if pixel_values is not None:
|
150 |
+
vit_embeds = self.extract_feature(pixel_values.to(torch.bfloat16))
|
151 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
152 |
+
B, N, C = input_embeds.shape
|
153 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
154 |
+
|
155 |
+
input_ids = input_ids.reshape(B * N)
|
156 |
+
selected = (input_ids == self.config.image_token_index)
|
157 |
+
assert selected.sum() != 0
|
158 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
159 |
+
|
160 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
161 |
+
else:
|
162 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
163 |
+
|
164 |
+
outputs = self.language_model.generate(
|
165 |
+
inputs_embeds=input_embeds,
|
166 |
+
attention_mask=attention_mask,
|
167 |
+
phantom_position=phantom_position,
|
168 |
+
generation_config=generation_config,
|
169 |
+
output_hidden_states=output_hidden_states,
|
170 |
+
return_dict=return_dict,
|
171 |
+
use_cache=True,
|
172 |
+
pad_token_id=self.config.eos_token_id,
|
173 |
+
eos_token_id=self.config.eos_token_id,
|
174 |
+
**generate_kwargs,
|
175 |
+
)
|
176 |
+
|
177 |
+
return outputs
|
model/arch_3_8b/modeling_phi3.py
ADDED
@@ -0,0 +1,1683 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
""" PyTorch Phi-3 model."""
|
16 |
+
|
17 |
+
import inspect
|
18 |
+
import math
|
19 |
+
import warnings
|
20 |
+
from typing import List, Optional, Tuple, Union
|
21 |
+
|
22 |
+
import torch
|
23 |
+
import torch.nn.functional as F
|
24 |
+
import torch.utils.checkpoint
|
25 |
+
from torch import nn
|
26 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
27 |
+
from transformers.activations import ACT2FN
|
28 |
+
from transformers.cache_utils import Cache, DynamicCache
|
29 |
+
from transformers.modeling_attn_mask_utils import \
|
30 |
+
_prepare_4d_causal_attention_mask
|
31 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
32 |
+
CausalLMOutputWithPast,
|
33 |
+
SequenceClassifierOutputWithPast,
|
34 |
+
TokenClassifierOutput)
|
35 |
+
from transformers.modeling_utils import PreTrainedModel
|
36 |
+
from transformers.utils import (add_code_sample_docstrings,
|
37 |
+
add_start_docstrings,
|
38 |
+
add_start_docstrings_to_model_forward,
|
39 |
+
is_flash_attn_2_available,
|
40 |
+
is_flash_attn_greater_or_equal_2_10, logging,
|
41 |
+
replace_return_docstrings)
|
42 |
+
|
43 |
+
from .configuration_phi3 import Phi3Config
|
44 |
+
|
45 |
+
logger = logging.get_logger(__name__)
|
46 |
+
|
47 |
+
# Transformers scans dependencies in the modeling file, causing issues on conditional loading. The regex only ignores try/catch blocks, but not if statements
|
48 |
+
# if is_flash_attn_2_available():
|
49 |
+
_flash_supports_window_size = False
|
50 |
+
try:
|
51 |
+
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
52 |
+
from flash_attn.bert_padding import (index_first_axis, pad_input, # noqa
|
53 |
+
unpad_input)
|
54 |
+
|
55 |
+
_flash_supports_window_size = 'window_size' in list(inspect.signature(flash_attn_func).parameters)
|
56 |
+
has_flash_attn = True
|
57 |
+
except ImportError as error:
|
58 |
+
logger.warning(
|
59 |
+
f'`flash-attention` package not found, consider installing for better performance: {error}.'
|
60 |
+
)
|
61 |
+
if not _flash_supports_window_size:
|
62 |
+
logger.warning(
|
63 |
+
"Current `flash-attenton` does not support `window_size`. Either upgrade or use `attn_implementation='eager'`."
|
64 |
+
)
|
65 |
+
has_flash_attn = False
|
66 |
+
|
67 |
+
_CHECKPOINT_FOR_DOC = 'microsoft/Phi-3-mini-4k-instruct'
|
68 |
+
_CONFIG_FOR_DOC = 'Phi3Config'
|
69 |
+
|
70 |
+
PHI3_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
71 |
+
'microsoft/Phi-3-mini-4k-instruct',
|
72 |
+
'microsoft/Phi-3-mini-128k-instruct',
|
73 |
+
# See all Phi-3 models at https://huggingface.co/models?filter=Phi-3
|
74 |
+
]
|
75 |
+
|
76 |
+
# Phantom
|
77 |
+
from utils.utils import *
|
78 |
+
|
79 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Phi3
|
80 |
+
class Phi3RMSNorm(nn.Module):
|
81 |
+
def __init__(self, hidden_size, eps=1e-6):
|
82 |
+
"""
|
83 |
+
Phi3RMSNorm is equivalent to T5LayerNorm
|
84 |
+
"""
|
85 |
+
super().__init__()
|
86 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
87 |
+
self.variance_epsilon = eps
|
88 |
+
|
89 |
+
def forward(self, hidden_states):
|
90 |
+
input_dtype = hidden_states.dtype
|
91 |
+
hidden_states = hidden_states.to(torch.float32)
|
92 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
93 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
94 |
+
return self.weight * hidden_states.to(input_dtype)
|
95 |
+
|
96 |
+
|
97 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
98 |
+
def _get_unpad_data(attention_mask):
|
99 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
100 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
101 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
102 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
|
103 |
+
return (
|
104 |
+
indices,
|
105 |
+
cu_seqlens,
|
106 |
+
max_seqlen_in_batch,
|
107 |
+
)
|
108 |
+
|
109 |
+
|
110 |
+
# Copied from transformers.models.gemma.modeling_gemma.GemmaRotaryEmbedding with gemma->phi3, Gemma->Phi3
|
111 |
+
class Phi3RotaryEmbedding(nn.Module):
|
112 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
113 |
+
super().__init__()
|
114 |
+
|
115 |
+
self.dim = dim
|
116 |
+
self.max_position_embeddings = max_position_embeddings
|
117 |
+
self.base = base
|
118 |
+
self.register_buffer('inv_freq', None, persistent=False)
|
119 |
+
|
120 |
+
@torch.no_grad()
|
121 |
+
def forward(self, x, position_ids, seq_len=None):
|
122 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
123 |
+
if self.inv_freq is None:
|
124 |
+
self.inv_freq = 1.0 / (
|
125 |
+
self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim)
|
126 |
+
)
|
127 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
128 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
129 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
130 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
131 |
+
device_type = x.device.type
|
132 |
+
device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
|
133 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
134 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
135 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
136 |
+
cos = emb.cos()
|
137 |
+
sin = emb.sin()
|
138 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
139 |
+
|
140 |
+
|
141 |
+
class Phi3SuScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
142 |
+
def __init__(self, dim, config, device=None):
|
143 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
144 |
+
|
145 |
+
self.short_factor = config.rope_scaling['short_factor']
|
146 |
+
self.long_factor = config.rope_scaling['long_factor']
|
147 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
148 |
+
|
149 |
+
@torch.no_grad()
|
150 |
+
def forward(self, x, position_ids, seq_len=None):
|
151 |
+
seq_len = torch.max(position_ids) + 1
|
152 |
+
if seq_len > self.original_max_position_embeddings:
|
153 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
154 |
+
else:
|
155 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
156 |
+
|
157 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
158 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
159 |
+
|
160 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
161 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
162 |
+
|
163 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
164 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
165 |
+
device_type = x.device.type
|
166 |
+
device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
|
167 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
168 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
169 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
170 |
+
|
171 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
172 |
+
if scale <= 1.0:
|
173 |
+
scaling_factor = 1.0
|
174 |
+
else:
|
175 |
+
scaling_factor = math.sqrt(1 + math.log(scale) / math.log(self.original_max_position_embeddings))
|
176 |
+
|
177 |
+
cos = emb.cos() * scaling_factor
|
178 |
+
sin = emb.sin() * scaling_factor
|
179 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
180 |
+
|
181 |
+
|
182 |
+
class Phi3YarnScaledRotaryEmbedding(Phi3RotaryEmbedding):
|
183 |
+
def __init__(self, dim, config, device=None):
|
184 |
+
super().__init__(dim, config.max_position_embeddings, config.rope_theta, device)
|
185 |
+
|
186 |
+
self.short_factor = config.rope_scaling['short_factor']
|
187 |
+
self.long_factor = config.rope_scaling['long_factor']
|
188 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
189 |
+
|
190 |
+
@torch.no_grad()
|
191 |
+
def forward(self, x, position_ids, seq_len=None):
|
192 |
+
seq_len = torch.max(position_ids) + 1
|
193 |
+
if seq_len > self.original_max_position_embeddings:
|
194 |
+
ext_factors = torch.tensor(self.long_factor, dtype=torch.float32, device=x.device)
|
195 |
+
else:
|
196 |
+
ext_factors = torch.tensor(self.short_factor, dtype=torch.float32, device=x.device)
|
197 |
+
|
198 |
+
inv_freq_shape = torch.arange(0, self.dim, 2, dtype=torch.int64, device=x.device).float() / self.dim
|
199 |
+
self.inv_freq = 1.0 / (ext_factors * self.base**inv_freq_shape)
|
200 |
+
|
201 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
202 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
203 |
+
|
204 |
+
# Force float32 since bfloat16 loses precision on long contexts
|
205 |
+
# See https://github.com/huggingface/transformers/pull/29285
|
206 |
+
device_type = x.device.type
|
207 |
+
device_type = device_type if isinstance(device_type, str) and device_type != 'mps' else 'cpu'
|
208 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
209 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
210 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
211 |
+
|
212 |
+
scale = self.max_position_embeddings / self.original_max_position_embeddings
|
213 |
+
if scale <= 1.0:
|
214 |
+
scaling_factor = 1.0
|
215 |
+
else:
|
216 |
+
scaling_factor = 0.1 * math.log(scale) + 1.0
|
217 |
+
|
218 |
+
cos = emb.cos() * scaling_factor
|
219 |
+
sin = emb.sin() * scaling_factor
|
220 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
221 |
+
|
222 |
+
|
223 |
+
# Copied from transformers.models.llama.modeling_llama.rotate_half
|
224 |
+
def rotate_half(x):
|
225 |
+
"""Rotates half the hidden dims of the input."""
|
226 |
+
x1 = x[..., : x.shape[-1] // 2]
|
227 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
228 |
+
return torch.cat((-x2, x1), dim=-1)
|
229 |
+
|
230 |
+
|
231 |
+
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
|
232 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
233 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
234 |
+
|
235 |
+
Args:
|
236 |
+
q (`torch.Tensor`): The query tensor.
|
237 |
+
k (`torch.Tensor`): The key tensor.
|
238 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
239 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
240 |
+
position_ids (`torch.Tensor`, *optional*):
|
241 |
+
Deprecated and unused.
|
242 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
243 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
244 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
245 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
246 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
247 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
248 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
249 |
+
Returns:
|
250 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
251 |
+
"""
|
252 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
253 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
254 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
255 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
256 |
+
return q_embed, k_embed
|
257 |
+
|
258 |
+
|
259 |
+
class Phi3MLP(nn.Module):
|
260 |
+
def __init__(self, config):
|
261 |
+
super().__init__()
|
262 |
+
|
263 |
+
self.config = config
|
264 |
+
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
|
265 |
+
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
266 |
+
|
267 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
268 |
+
|
269 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
270 |
+
up_states = self.gate_up_proj(hidden_states)
|
271 |
+
|
272 |
+
gate, up_states = up_states.chunk(2, dim=-1)
|
273 |
+
up_states = up_states * self.activation_fn(gate)
|
274 |
+
|
275 |
+
return self.down_proj(up_states)
|
276 |
+
|
277 |
+
|
278 |
+
# Copied from transformers.models.llama.modeling_llama.repeat_kv with llama->phi
|
279 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
280 |
+
"""
|
281 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
282 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
283 |
+
"""
|
284 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
285 |
+
if n_rep == 1:
|
286 |
+
return hidden_states
|
287 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
288 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
289 |
+
|
290 |
+
|
291 |
+
class Phi3Attention(nn.Module):
|
292 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
293 |
+
|
294 |
+
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
|
295 |
+
super().__init__()
|
296 |
+
self.config = config
|
297 |
+
self.layer_idx = layer_idx
|
298 |
+
if layer_idx is None:
|
299 |
+
logger.warning_once(
|
300 |
+
f'Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will '
|
301 |
+
'lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` '
|
302 |
+
'when creating this class.'
|
303 |
+
)
|
304 |
+
|
305 |
+
self.attention_dropout = config.attention_dropout
|
306 |
+
self.hidden_size = config.hidden_size
|
307 |
+
self.num_heads = config.num_attention_heads
|
308 |
+
self.head_dim = self.hidden_size // self.num_heads
|
309 |
+
self.num_key_value_heads = config.num_key_value_heads
|
310 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
311 |
+
self.max_position_embeddings = config.max_position_embeddings
|
312 |
+
self.original_max_position_embeddings = config.original_max_position_embeddings
|
313 |
+
self.rope_theta = config.rope_theta
|
314 |
+
self.rope_scaling = config.rope_scaling
|
315 |
+
self.is_causal = True
|
316 |
+
|
317 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
318 |
+
raise ValueError(
|
319 |
+
f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
|
320 |
+
f' and `num_heads`: {self.num_heads}).'
|
321 |
+
)
|
322 |
+
|
323 |
+
op_size = self.num_heads * self.head_dim + 2 * (self.num_key_value_heads * self.head_dim)
|
324 |
+
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
|
325 |
+
self.qkv_proj = nn.Linear(self.hidden_size, op_size, bias=False)
|
326 |
+
self._init_rope()
|
327 |
+
|
328 |
+
"""
|
329 |
+
Phantom
|
330 |
+
"""
|
331 |
+
# Phantom Init
|
332 |
+
self.turn_on_phantom = True
|
333 |
+
self.xattn_query_phantom = XAttention(self.head_dim)
|
334 |
+
self.xattn_key_phantom = XAttention(self.head_dim)
|
335 |
+
self.xattn_value_phantom = XAttention(self.head_dim)
|
336 |
+
self.gating_phantom_1 = nn.Linear(self.head_dim, 1)
|
337 |
+
self.gating_phantom_2 = nn.Linear(self.head_dim, 1)
|
338 |
+
|
339 |
+
def _init_rope(self):
|
340 |
+
if self.rope_scaling is None:
|
341 |
+
self.rotary_emb = Phi3RotaryEmbedding(
|
342 |
+
self.head_dim,
|
343 |
+
max_position_embeddings=self.max_position_embeddings,
|
344 |
+
base=self.rope_theta,
|
345 |
+
)
|
346 |
+
else:
|
347 |
+
scaling_type = self.config.rope_scaling['type']
|
348 |
+
if scaling_type == 'su':
|
349 |
+
self.rotary_emb = Phi3SuScaledRotaryEmbedding(self.head_dim, self.config)
|
350 |
+
elif scaling_type == 'yarn':
|
351 |
+
self.rotary_emb = Phi3YarnScaledRotaryEmbedding(self.head_dim, self.config)
|
352 |
+
else:
|
353 |
+
raise ValueError(f'Unknown RoPE scaling type {scaling_type}')
|
354 |
+
|
355 |
+
def forward(
|
356 |
+
self,
|
357 |
+
hidden_states: torch.Tensor,
|
358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
359 |
+
position_ids: Optional[torch.LongTensor] = None,
|
360 |
+
past_key_value: Optional[Cache] = None,
|
361 |
+
output_attentions: bool = False,
|
362 |
+
use_cache: bool = False,
|
363 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
364 |
+
logger.warning_once('You are not running the flash-attention implementation, expect numerical differences.')
|
365 |
+
|
366 |
+
bsz, q_len, _ = hidden_states.size()
|
367 |
+
|
368 |
+
qkv = self.qkv_proj(hidden_states)
|
369 |
+
query_pos = self.num_heads * self.head_dim
|
370 |
+
query_states = qkv[..., :query_pos]
|
371 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
372 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
373 |
+
|
374 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
375 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
376 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
377 |
+
|
378 |
+
kv_seq_len = key_states.shape[-2]
|
379 |
+
if past_key_value is not None:
|
380 |
+
if self.layer_idx is None:
|
381 |
+
raise ValueError(
|
382 |
+
f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
|
383 |
+
'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
|
384 |
+
'with a layer index.'
|
385 |
+
)
|
386 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
387 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
388 |
+
|
389 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
390 |
+
|
391 |
+
if past_key_value is not None:
|
392 |
+
cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
|
393 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
394 |
+
|
395 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
396 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
397 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
398 |
+
|
399 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
400 |
+
|
401 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
402 |
+
raise ValueError(
|
403 |
+
f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
|
404 |
+
f' {attn_weights.size()}'
|
405 |
+
)
|
406 |
+
|
407 |
+
if attention_mask is not None:
|
408 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
409 |
+
raise ValueError(
|
410 |
+
f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
|
411 |
+
)
|
412 |
+
attn_weights = attn_weights + attention_mask
|
413 |
+
|
414 |
+
# upcast attention to fp32
|
415 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(value_states.dtype)
|
416 |
+
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
417 |
+
|
418 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
419 |
+
|
420 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
421 |
+
raise ValueError(
|
422 |
+
f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
|
423 |
+
f' {attn_output.size()}'
|
424 |
+
)
|
425 |
+
|
426 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
427 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
428 |
+
|
429 |
+
attn_output = self.o_proj(attn_output)
|
430 |
+
|
431 |
+
if not output_attentions:
|
432 |
+
attn_weights = None
|
433 |
+
|
434 |
+
return attn_output, attn_weights, past_key_value
|
435 |
+
|
436 |
+
|
437 |
+
class Phi3FlashAttention2(Phi3Attention):
|
438 |
+
"""
|
439 |
+
Phi-3 flash attention module. This module inherits from `Phi3Attention` as the weights of the module stays
|
440 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
441 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
442 |
+
"""
|
443 |
+
|
444 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
|
445 |
+
def __init__(self, *args, **kwargs):
|
446 |
+
super().__init__(*args, **kwargs)
|
447 |
+
|
448 |
+
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
|
449 |
+
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
|
450 |
+
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
|
451 |
+
self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
452 |
+
|
453 |
+
def forward(
|
454 |
+
self,
|
455 |
+
hidden_states: torch.Tensor,
|
456 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
457 |
+
position_ids: Optional[torch.LongTensor] = None,
|
458 |
+
past_key_value: Optional[Cache] = None,
|
459 |
+
phantom_position: torch.BoolTensor = None,
|
460 |
+
output_attentions: bool = False,
|
461 |
+
use_cache: bool = False,
|
462 |
+
**kwargs,
|
463 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
464 |
+
# Phi3FlashAttention2 attention does not support output_attentions
|
465 |
+
|
466 |
+
if not _flash_supports_window_size:
|
467 |
+
logger.warning_once(
|
468 |
+
"The current flash attention version does not support sliding window attention. Please use `attn_implementation='eager'` or upgrade flash-attn library."
|
469 |
+
)
|
470 |
+
raise ValueError('The current flash attention version does not support sliding window attention.')
|
471 |
+
|
472 |
+
output_attentions = False
|
473 |
+
|
474 |
+
if 'padding_mask' in kwargs:
|
475 |
+
warnings.warn(
|
476 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
|
477 |
+
)
|
478 |
+
|
479 |
+
# overwrite attention_mask with padding_mask
|
480 |
+
attention_mask = kwargs.pop('padding_mask')
|
481 |
+
|
482 |
+
bsz, q_len, _ = hidden_states.size()
|
483 |
+
|
484 |
+
qkv = self.qkv_proj(hidden_states)
|
485 |
+
query_pos = self.num_heads * self.head_dim
|
486 |
+
query_states = qkv[..., :query_pos]
|
487 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
488 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
489 |
+
|
490 |
+
# Flash attention requires the input to have the shape
|
491 |
+
# batch_size x seq_length x head_dim x hidden_dim
|
492 |
+
# therefore we just need to keep the original shape
|
493 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
494 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
495 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
496 |
+
|
497 |
+
kv_seq_len = key_states.shape[-2]
|
498 |
+
if past_key_value is not None:
|
499 |
+
if self.layer_idx is None:
|
500 |
+
raise ValueError(
|
501 |
+
f'The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} '
|
502 |
+
'for auto-regressive decoding with k/v caching, please make sure to initialize the attention class '
|
503 |
+
'with a layer index.'
|
504 |
+
)
|
505 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
506 |
+
|
507 |
+
# Because the input can be padded, the absolute sequence length depends on the max position id.
|
508 |
+
rotary_seq_len = max(kv_seq_len, position_ids[:, -1].max().item()) + 1
|
509 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=rotary_seq_len)
|
510 |
+
|
511 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
512 |
+
|
513 |
+
use_sliding_windows = (
|
514 |
+
_flash_supports_window_size
|
515 |
+
and getattr(self.config, 'sliding_window', None) is not None
|
516 |
+
and kv_seq_len > self.config.sliding_window
|
517 |
+
)
|
518 |
+
|
519 |
+
if past_key_value is not None:
|
520 |
+
# Activate slicing cache only if the config has a value `sliding_windows` attribute
|
521 |
+
cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
522 |
+
if (
|
523 |
+
getattr(self.config, 'sliding_window', None) is not None
|
524 |
+
and kv_seq_len > self.config.sliding_window
|
525 |
+
and cache_has_contents
|
526 |
+
):
|
527 |
+
slicing_tokens = 1 - self.config.sliding_window
|
528 |
+
|
529 |
+
past_key = past_key_value[self.layer_idx][0]
|
530 |
+
past_value = past_key_value[self.layer_idx][1]
|
531 |
+
|
532 |
+
past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
533 |
+
past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
534 |
+
|
535 |
+
if past_key.shape[-2] != self.config.sliding_window - 1:
|
536 |
+
raise ValueError(
|
537 |
+
f'past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got'
|
538 |
+
f' {past_key.shape}'
|
539 |
+
)
|
540 |
+
|
541 |
+
if attention_mask is not None:
|
542 |
+
attention_mask = attention_mask[:, slicing_tokens:]
|
543 |
+
attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
544 |
+
|
545 |
+
cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
|
546 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
547 |
+
|
548 |
+
# repeat k/v heads if n_kv_heads < n_heads
|
549 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
550 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
551 |
+
|
552 |
+
attn_dropout = self.attention_dropout if self.training else 0.0
|
553 |
+
|
554 |
+
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
|
555 |
+
# therefore the input hidden states gets silently casted in float32. Hence, we need
|
556 |
+
# cast them back in the correct dtype just to be sure everything works as expected.
|
557 |
+
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
|
558 |
+
# in fp32.
|
559 |
+
|
560 |
+
if query_states.dtype == torch.float32:
|
561 |
+
if torch.is_autocast_enabled():
|
562 |
+
target_dtype = torch.get_autocast_gpu_dtype()
|
563 |
+
# Handle the case where the model is quantized
|
564 |
+
elif hasattr(self.config, '_pre_quantization_dtype'):
|
565 |
+
target_dtype = self.config._pre_quantization_dtype
|
566 |
+
else:
|
567 |
+
target_dtype = self.qkv_proj.weight.dtype
|
568 |
+
|
569 |
+
logger.warning_once(
|
570 |
+
f'The input hidden states seems to be silently casted in float32, this might be related to'
|
571 |
+
f' the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in'
|
572 |
+
f' {target_dtype}.'
|
573 |
+
)
|
574 |
+
|
575 |
+
query_states = query_states.to(target_dtype)
|
576 |
+
key_states = key_states.to(target_dtype)
|
577 |
+
value_states = value_states.to(target_dtype)
|
578 |
+
|
579 |
+
# Reashape to the expected shape for Flash Attention
|
580 |
+
query_states = query_states.transpose(1, 2)
|
581 |
+
key_states = key_states.transpose(1, 2)
|
582 |
+
value_states = value_states.transpose(1, 2)
|
583 |
+
|
584 |
+
attn_output = self._flash_attention_forward(
|
585 |
+
query_states,
|
586 |
+
key_states,
|
587 |
+
value_states,
|
588 |
+
attention_mask,
|
589 |
+
q_len,
|
590 |
+
phantom_position,
|
591 |
+
dropout=attn_dropout,
|
592 |
+
use_sliding_windows=use_sliding_windows,
|
593 |
+
)
|
594 |
+
|
595 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
596 |
+
attn_output = self.o_proj(attn_output)
|
597 |
+
|
598 |
+
if not output_attentions:
|
599 |
+
attn_weights = None
|
600 |
+
|
601 |
+
return attn_output, attn_weights, past_key_value
|
602 |
+
|
603 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._flash_attention_forward
|
604 |
+
def _flash_attention_forward(
|
605 |
+
self,
|
606 |
+
query_states,
|
607 |
+
key_states,
|
608 |
+
value_states,
|
609 |
+
attention_mask,
|
610 |
+
query_length,
|
611 |
+
phantom_position,
|
612 |
+
dropout=0.0,
|
613 |
+
softmax_scale=None,
|
614 |
+
use_sliding_windows=False,
|
615 |
+
):
|
616 |
+
"""
|
617 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
618 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
619 |
+
|
620 |
+
Args:
|
621 |
+
query_states (`torch.Tensor`):
|
622 |
+
Input query states to be passed to Flash Attention API
|
623 |
+
key_states (`torch.Tensor`):
|
624 |
+
Input key states to be passed to Flash Attention API
|
625 |
+
value_states (`torch.Tensor`):
|
626 |
+
Input value states to be passed to Flash Attention API
|
627 |
+
attention_mask (`torch.Tensor`):
|
628 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
629 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
630 |
+
dropout (`float`):
|
631 |
+
Attention dropout
|
632 |
+
softmax_scale (`float`, *optional*):
|
633 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
634 |
+
use_sliding_windows (`bool`, *optional*):
|
635 |
+
Whether to activate sliding window attention.
|
636 |
+
"""
|
637 |
+
|
638 |
+
"""
|
639 |
+
Phantom
|
640 |
+
"""
|
641 |
+
if self.turn_on_phantom:
|
642 |
+
|
643 |
+
# [Important] softmax_scale
|
644 |
+
softmax_scale = 1 / math.sqrt(query_states.shape[-1])
|
645 |
+
|
646 |
+
query_states_phantom = []
|
647 |
+
key_states_phantom = []
|
648 |
+
value_states_phantom = []
|
649 |
+
for index, pos in enumerate(phantom_position):
|
650 |
+
if query_states.shape[1] > 1:
|
651 |
+
query_states_phantom.append(query_states[index][pos])
|
652 |
+
key_states_phantom.append(key_states[index][pos])
|
653 |
+
value_states_phantom.append(value_states[index][pos])
|
654 |
+
|
655 |
+
# saving phantom qkv for inference
|
656 |
+
self.query_states_phantom = query_states_phantom
|
657 |
+
self.key_states_phantom = key_states_phantom
|
658 |
+
self.value_states_phantom = value_states_phantom
|
659 |
+
|
660 |
+
# phantom qkv: list to tensor
|
661 |
+
query_states_phantom = torch.stack(self.query_states_phantom)
|
662 |
+
key_states_phantom = torch.stack(self.key_states_phantom)
|
663 |
+
value_states_phantom = torch.stack(self.value_states_phantom)
|
664 |
+
|
665 |
+
# phantom qkv: 1 -> N (sequence)
|
666 |
+
query_states_phantom = self.xattn_query_phantom(q=query_states, k=query_states_phantom, v=query_states_phantom)
|
667 |
+
key_states_phantom = self.xattn_key_phantom(q=key_states, k=key_states_phantom, v=key_states_phantom)
|
668 |
+
value_states_phantom = self.xattn_value_phantom(q=value_states, k=value_states_phantom, v=value_states_phantom, is_residual=True)
|
669 |
+
|
670 |
+
# concat original qkv and phantom qkv for hidden-dimension / heads
|
671 |
+
query_states = torch.cat([query_states, query_states_phantom], dim=3)
|
672 |
+
key_states = torch.cat([key_states, key_states_phantom], dim=3)
|
673 |
+
value_states = torch.cat([value_states, value_states_phantom], dim=3)
|
674 |
+
|
675 |
+
if not self._flash_attn_uses_top_left_mask:
|
676 |
+
causal = self.is_causal
|
677 |
+
else:
|
678 |
+
# TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
|
679 |
+
causal = self.is_causal and query_length != 1
|
680 |
+
|
681 |
+
# Contains at least one padding token in the sequence
|
682 |
+
if attention_mask is not None:
|
683 |
+
batch_size = query_states.shape[0]
|
684 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
|
685 |
+
query_states, key_states, value_states, attention_mask, query_length
|
686 |
+
)
|
687 |
+
|
688 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
689 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
690 |
+
|
691 |
+
if not use_sliding_windows:
|
692 |
+
attn_output_unpad = flash_attn_varlen_func(
|
693 |
+
query_states,
|
694 |
+
key_states,
|
695 |
+
value_states,
|
696 |
+
cu_seqlens_q=cu_seqlens_q,
|
697 |
+
cu_seqlens_k=cu_seqlens_k,
|
698 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
699 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
700 |
+
dropout_p=dropout,
|
701 |
+
softmax_scale=softmax_scale,
|
702 |
+
causal=causal,
|
703 |
+
)
|
704 |
+
else:
|
705 |
+
attn_output_unpad = flash_attn_varlen_func(
|
706 |
+
query_states,
|
707 |
+
key_states,
|
708 |
+
value_states,
|
709 |
+
cu_seqlens_q=cu_seqlens_q,
|
710 |
+
cu_seqlens_k=cu_seqlens_k,
|
711 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
712 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
713 |
+
dropout_p=dropout,
|
714 |
+
softmax_scale=softmax_scale,
|
715 |
+
causal=causal,
|
716 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
717 |
+
)
|
718 |
+
|
719 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
720 |
+
else:
|
721 |
+
if not use_sliding_windows:
|
722 |
+
attn_output = flash_attn_func(
|
723 |
+
query_states,
|
724 |
+
key_states,
|
725 |
+
value_states,
|
726 |
+
dropout,
|
727 |
+
softmax_scale=softmax_scale,
|
728 |
+
causal=causal,
|
729 |
+
)
|
730 |
+
else:
|
731 |
+
attn_output = flash_attn_func(
|
732 |
+
query_states,
|
733 |
+
key_states,
|
734 |
+
value_states,
|
735 |
+
dropout,
|
736 |
+
softmax_scale=softmax_scale,
|
737 |
+
causal=causal,
|
738 |
+
window_size=(self.config.sliding_window, self.config.sliding_window),
|
739 |
+
)
|
740 |
+
|
741 |
+
"""
|
742 |
+
Phantom
|
743 |
+
"""
|
744 |
+
if self.turn_on_phantom:
|
745 |
+
half_dim = attn_output.shape[-1] // 2
|
746 |
+
half1_o = attn_output[...,:half_dim]
|
747 |
+
half2_o = attn_output[...,half_dim:]
|
748 |
+
weight1 = self.gating_phantom_1(half1_o)
|
749 |
+
weight2 = self.gating_phantom_2(half2_o)
|
750 |
+
weight_norm = weight1.exp() / (weight1.exp() + weight2.exp())
|
751 |
+
attn_output = weight_norm * half1_o + (1-weight_norm) * half2_o
|
752 |
+
return attn_output
|
753 |
+
|
754 |
+
# Copied from transformers.models.mistral.modeling_mistral.MistralFlashAttention2._upad_input
|
755 |
+
def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
756 |
+
batch_size, kv_seq_len, num_heads, head_dim = key_layer.shape
|
757 |
+
|
758 |
+
# On the first iteration we need to properly re-create the padding mask
|
759 |
+
# by slicing it on the proper place
|
760 |
+
if kv_seq_len != attention_mask.shape[-1]:
|
761 |
+
attention_mask_num_tokens = attention_mask.shape[-1]
|
762 |
+
attention_mask = attention_mask[:, attention_mask_num_tokens - kv_seq_len :]
|
763 |
+
|
764 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
765 |
+
|
766 |
+
key_layer = index_first_axis(key_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
767 |
+
value_layer = index_first_axis(value_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k)
|
768 |
+
|
769 |
+
if query_length == kv_seq_len:
|
770 |
+
query_layer = index_first_axis(
|
771 |
+
query_layer.reshape(batch_size * kv_seq_len, num_heads, head_dim), indices_k
|
772 |
+
)
|
773 |
+
cu_seqlens_q = cu_seqlens_k
|
774 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
775 |
+
indices_q = indices_k
|
776 |
+
elif query_length == 1:
|
777 |
+
max_seqlen_in_batch_q = 1
|
778 |
+
cu_seqlens_q = torch.arange(
|
779 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
780 |
+
) # There is a memcpy here, that is very bad.
|
781 |
+
indices_q = cu_seqlens_q[:-1]
|
782 |
+
query_layer = query_layer.squeeze(1)
|
783 |
+
else:
|
784 |
+
# The -q_len: slice assumes left padding.
|
785 |
+
attention_mask = attention_mask[:, -query_length:]
|
786 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
787 |
+
|
788 |
+
return (
|
789 |
+
query_layer,
|
790 |
+
key_layer,
|
791 |
+
value_layer,
|
792 |
+
indices_q,
|
793 |
+
(cu_seqlens_q, cu_seqlens_k),
|
794 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
795 |
+
)
|
796 |
+
|
797 |
+
|
798 |
+
# copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention with Llama->Phi3
|
799 |
+
# TODO @Arthur no longer copied from LLama after static cache
|
800 |
+
class Phi3SdpaAttention(Phi3Attention):
|
801 |
+
"""
|
802 |
+
Phi3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
803 |
+
`Phi3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
804 |
+
SDPA API.
|
805 |
+
"""
|
806 |
+
|
807 |
+
# Adapted from Phi3Attention.forward
|
808 |
+
def forward(
|
809 |
+
self,
|
810 |
+
hidden_states: torch.Tensor,
|
811 |
+
attention_mask: Optional[torch.Tensor] = None,
|
812 |
+
position_ids: Optional[torch.LongTensor] = None,
|
813 |
+
past_key_value: Optional[Cache] = None,
|
814 |
+
output_attentions: bool = False,
|
815 |
+
use_cache: bool = False,
|
816 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
817 |
+
if output_attentions:
|
818 |
+
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
|
819 |
+
logger.warning_once(
|
820 |
+
'Phi3Model is using Phi3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, '
|
821 |
+
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
822 |
+
)
|
823 |
+
return super().forward(
|
824 |
+
hidden_states=hidden_states,
|
825 |
+
attention_mask=attention_mask,
|
826 |
+
position_ids=position_ids,
|
827 |
+
past_key_value=past_key_value,
|
828 |
+
output_attentions=output_attentions,
|
829 |
+
use_cache=use_cache,
|
830 |
+
)
|
831 |
+
|
832 |
+
bsz, q_len, _ = hidden_states.size()
|
833 |
+
|
834 |
+
qkv = self.qkv_proj(hidden_states)
|
835 |
+
query_pos = self.num_heads * self.head_dim
|
836 |
+
query_states = qkv[..., :query_pos]
|
837 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
838 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
839 |
+
|
840 |
+
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
841 |
+
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
842 |
+
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
843 |
+
|
844 |
+
kv_seq_len = key_states.shape[-2]
|
845 |
+
if past_key_value is not None:
|
846 |
+
kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
847 |
+
cos, sin = self.rotary_emb(value_states, position_ids, seq_len=kv_seq_len)
|
848 |
+
|
849 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
850 |
+
|
851 |
+
if past_key_value is not None:
|
852 |
+
cache_kwargs = {'sin': sin, 'cos': cos} # Specific to RoPE models
|
853 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
854 |
+
|
855 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
856 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
857 |
+
|
858 |
+
if attention_mask is not None:
|
859 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
860 |
+
raise ValueError(
|
861 |
+
f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
|
862 |
+
)
|
863 |
+
|
864 |
+
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
|
865 |
+
# Reference: https://github.com/pytorch/pytorch/issues/112577.
|
866 |
+
if query_states.device.type == 'cuda' and attention_mask is not None:
|
867 |
+
query_states = query_states.contiguous()
|
868 |
+
key_states = key_states.contiguous()
|
869 |
+
value_states = value_states.contiguous()
|
870 |
+
|
871 |
+
attn_output = torch.nn.functional.scaled_dot_product_attention(
|
872 |
+
query_states,
|
873 |
+
key_states,
|
874 |
+
value_states,
|
875 |
+
attn_mask=attention_mask,
|
876 |
+
dropout_p=self.attention_dropout if self.training else 0.0,
|
877 |
+
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
|
878 |
+
is_causal=self.is_causal and attention_mask is None and q_len > 1,
|
879 |
+
)
|
880 |
+
|
881 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
882 |
+
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
|
883 |
+
|
884 |
+
attn_output = self.o_proj(attn_output)
|
885 |
+
|
886 |
+
return attn_output, None, past_key_value
|
887 |
+
|
888 |
+
|
889 |
+
PHI3_ATTENTION_CLASSES = {
|
890 |
+
'eager': Phi3Attention,
|
891 |
+
'flash_attention_2': Phi3FlashAttention2,
|
892 |
+
'sdpa': Phi3SdpaAttention,
|
893 |
+
}
|
894 |
+
|
895 |
+
|
896 |
+
class Phi3DecoderLayer(nn.Module):
|
897 |
+
def __init__(self, config: Phi3Config, layer_idx: int):
|
898 |
+
super().__init__()
|
899 |
+
|
900 |
+
self.config = config
|
901 |
+
self.self_attn = PHI3_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx=layer_idx)
|
902 |
+
|
903 |
+
self.mlp = Phi3MLP(config)
|
904 |
+
self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
905 |
+
|
906 |
+
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
|
907 |
+
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
|
908 |
+
self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
909 |
+
|
910 |
+
def forward(
|
911 |
+
self,
|
912 |
+
hidden_states: torch.Tensor,
|
913 |
+
attention_mask: Optional[torch.Tensor] = None,
|
914 |
+
position_ids: Optional[torch.LongTensor] = None,
|
915 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
916 |
+
phantom_position: torch.BoolTensor = None,
|
917 |
+
output_attentions: Optional[bool] = False,
|
918 |
+
use_cache: Optional[bool] = False,
|
919 |
+
**kwargs,
|
920 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
921 |
+
if 'padding_mask' in kwargs:
|
922 |
+
warnings.warn(
|
923 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`'
|
924 |
+
)
|
925 |
+
"""
|
926 |
+
Args:
|
927 |
+
hidden_states (`torch.FloatTensor`):
|
928 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
929 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
930 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
931 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
932 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
|
933 |
+
`[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
934 |
+
output_attentions (`bool`, *optional*):
|
935 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
936 |
+
returned tensors for more detail.
|
937 |
+
use_cache (`bool`, *optional*):
|
938 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
939 |
+
(see `past_key_values`).
|
940 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
941 |
+
"""
|
942 |
+
|
943 |
+
residual = hidden_states
|
944 |
+
|
945 |
+
hidden_states = self.input_layernorm(hidden_states)
|
946 |
+
|
947 |
+
# Self Attention
|
948 |
+
attn_outputs, self_attn_weights, present_key_value = self.self_attn(
|
949 |
+
hidden_states=hidden_states,
|
950 |
+
attention_mask=attention_mask,
|
951 |
+
position_ids=position_ids,
|
952 |
+
past_key_value=past_key_value,
|
953 |
+
phantom_position=phantom_position,
|
954 |
+
output_attentions=output_attentions,
|
955 |
+
use_cache=use_cache,
|
956 |
+
)
|
957 |
+
|
958 |
+
hidden_states = residual + self.resid_attn_dropout(attn_outputs)
|
959 |
+
|
960 |
+
residual = hidden_states
|
961 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
962 |
+
hidden_states = self.mlp(hidden_states)
|
963 |
+
hidden_states = residual + self.resid_mlp_dropout(hidden_states)
|
964 |
+
|
965 |
+
outputs = (hidden_states,)
|
966 |
+
|
967 |
+
if output_attentions:
|
968 |
+
outputs += (self_attn_weights,)
|
969 |
+
|
970 |
+
if use_cache:
|
971 |
+
outputs += (present_key_value,)
|
972 |
+
|
973 |
+
return outputs
|
974 |
+
|
975 |
+
|
976 |
+
PHI3_START_DOCSTRING = r"""
|
977 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
978 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
979 |
+
etc.)
|
980 |
+
|
981 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
982 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
983 |
+
and behavior.
|
984 |
+
|
985 |
+
Parameters:
|
986 |
+
config ([`Phi3Config`]):
|
987 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
988 |
+
load the weights associated with the model, only the configuration. Check out the
|
989 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
990 |
+
"""
|
991 |
+
|
992 |
+
|
993 |
+
@add_start_docstrings(
|
994 |
+
'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
|
995 |
+
PHI3_START_DOCSTRING,
|
996 |
+
)
|
997 |
+
class Phi3PreTrainedModel(PreTrainedModel):
|
998 |
+
config_class = Phi3Config
|
999 |
+
base_model_prefix = 'model'
|
1000 |
+
supports_gradient_checkpointing = True
|
1001 |
+
_no_split_modules = ['Phi3DecoderLayer']
|
1002 |
+
_skip_keys_device_placement = 'past_key_values'
|
1003 |
+
_supports_flash_attn_2 = True
|
1004 |
+
_supports_sdpa = False
|
1005 |
+
_supports_cache_class = True
|
1006 |
+
|
1007 |
+
_version = '0.0.5'
|
1008 |
+
|
1009 |
+
def __init__(self, config: Phi3Config):
|
1010 |
+
if not has_flash_attn:
|
1011 |
+
config._attn_implementation = 'eager'
|
1012 |
+
print('Warning: Flash attention is not available, using eager attention instead.')
|
1013 |
+
super().__init__(config)
|
1014 |
+
|
1015 |
+
def _init_weights(self, module):
|
1016 |
+
std = self.config.initializer_range
|
1017 |
+
if isinstance(module, nn.Linear):
|
1018 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
1019 |
+
if module.bias is not None:
|
1020 |
+
module.bias.data.zero_()
|
1021 |
+
elif isinstance(module, nn.Embedding):
|
1022 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
1023 |
+
if module.padding_idx is not None:
|
1024 |
+
module.weight.data[module.padding_idx].zero_()
|
1025 |
+
|
1026 |
+
|
1027 |
+
PHI3_INPUTS_DOCSTRING = r"""
|
1028 |
+
Args:
|
1029 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
1030 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
1031 |
+
it.
|
1032 |
+
|
1033 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1034 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1035 |
+
|
1036 |
+
[What are input IDs?](../glossary#input-ids)
|
1037 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1038 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
1039 |
+
|
1040 |
+
- 1 for tokens that are **not masked**,
|
1041 |
+
- 0 for tokens that are **masked**.
|
1042 |
+
|
1043 |
+
[What are attention masks?](../glossary#attention-mask)
|
1044 |
+
|
1045 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
1046 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
1047 |
+
|
1048 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
1049 |
+
`past_key_values`).
|
1050 |
+
|
1051 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
1052 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
1053 |
+
information on the default strategy.
|
1054 |
+
|
1055 |
+
- 1 indicates the head is **not masked**,
|
1056 |
+
- 0 indicates the head is **masked**.
|
1057 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1058 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
1059 |
+
config.n_positions - 1]`.
|
1060 |
+
|
1061 |
+
[What are position IDs?](../glossary#position-ids)
|
1062 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
1063 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
1064 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
1065 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
1066 |
+
|
1067 |
+
Two formats are allowed:
|
1068 |
+
- a [`~cache_utils.Cache`] instance;
|
1069 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
1070 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
1071 |
+
cache format.
|
1072 |
+
|
1073 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
1074 |
+
legacy cache format will be returned.
|
1075 |
+
|
1076 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
1077 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
1078 |
+
of shape `(batch_size, sequence_length)`.
|
1079 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
1080 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
1081 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
1082 |
+
model's internal embedding lookup matrix.
|
1083 |
+
use_cache (`bool`, *optional*):
|
1084 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
1085 |
+
`past_key_values`).
|
1086 |
+
output_attentions (`bool`, *optional*):
|
1087 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
1088 |
+
tensors for more detail.
|
1089 |
+
output_hidden_states (`bool`, *optional*):
|
1090 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
1091 |
+
more detail.
|
1092 |
+
return_dict (`bool`, *optional*):
|
1093 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
1094 |
+
"""
|
1095 |
+
|
1096 |
+
|
1097 |
+
@add_start_docstrings(
|
1098 |
+
'The bare Phi-3 model outputting raw hidden-states without any specific head on top.',
|
1099 |
+
PHI3_START_DOCSTRING,
|
1100 |
+
)
|
1101 |
+
class Phi3Model(Phi3PreTrainedModel):
|
1102 |
+
"""
|
1103 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
|
1104 |
+
|
1105 |
+
Args:
|
1106 |
+
config: Phi3Config
|
1107 |
+
"""
|
1108 |
+
|
1109 |
+
def __init__(self, config: Phi3Config):
|
1110 |
+
super().__init__(config)
|
1111 |
+
self.padding_idx = config.pad_token_id
|
1112 |
+
self.vocab_size = config.vocab_size
|
1113 |
+
|
1114 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
1115 |
+
self.embed_dropout = nn.Dropout(config.embd_pdrop)
|
1116 |
+
self.layers = nn.ModuleList(
|
1117 |
+
[Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
1118 |
+
)
|
1119 |
+
self._attn_implementation = config._attn_implementation
|
1120 |
+
|
1121 |
+
self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
1122 |
+
|
1123 |
+
self.gradient_checkpointing = False
|
1124 |
+
# Initialize weights and apply final processing
|
1125 |
+
self.post_init()
|
1126 |
+
|
1127 |
+
def get_input_embeddings(self):
|
1128 |
+
return self.embed_tokens
|
1129 |
+
|
1130 |
+
def set_input_embeddings(self, value):
|
1131 |
+
self.embed_tokens = value
|
1132 |
+
|
1133 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1134 |
+
def forward(
|
1135 |
+
self,
|
1136 |
+
input_ids: torch.LongTensor = None,
|
1137 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1138 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1139 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1140 |
+
phantom_position: torch.BoolTensor = None,
|
1141 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1142 |
+
use_cache: Optional[bool] = None,
|
1143 |
+
output_attentions: Optional[bool] = None,
|
1144 |
+
output_hidden_states: Optional[bool] = None,
|
1145 |
+
return_dict: Optional[bool] = None,
|
1146 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
1147 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1148 |
+
output_hidden_states = (
|
1149 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1150 |
+
)
|
1151 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
1152 |
+
|
1153 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1154 |
+
|
1155 |
+
# retrieve input_ids and inputs_embeds
|
1156 |
+
if input_ids is not None and inputs_embeds is not None:
|
1157 |
+
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
|
1158 |
+
elif input_ids is not None:
|
1159 |
+
batch_size, seq_length = input_ids.shape[:2]
|
1160 |
+
elif inputs_embeds is not None:
|
1161 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
1162 |
+
else:
|
1163 |
+
raise ValueError('You have to specify either input_ids or inputs_embeds')
|
1164 |
+
|
1165 |
+
past_key_values_length = 0
|
1166 |
+
|
1167 |
+
if self.gradient_checkpointing and self.training:
|
1168 |
+
if use_cache:
|
1169 |
+
logger.warning_once(
|
1170 |
+
'`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
|
1171 |
+
)
|
1172 |
+
use_cache = False
|
1173 |
+
|
1174 |
+
if use_cache:
|
1175 |
+
use_legacy_cache = not isinstance(past_key_values, Cache)
|
1176 |
+
if use_legacy_cache:
|
1177 |
+
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
1178 |
+
past_key_values_length = past_key_values.get_usable_length(seq_length)
|
1179 |
+
|
1180 |
+
if position_ids is None:
|
1181 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1182 |
+
position_ids = torch.arange(
|
1183 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
1184 |
+
)
|
1185 |
+
position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
|
1186 |
+
else:
|
1187 |
+
position_ids = position_ids.view(-1, seq_length).long()
|
1188 |
+
|
1189 |
+
if inputs_embeds is None:
|
1190 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
1191 |
+
|
1192 |
+
if attention_mask is not None and self._attn_implementation == 'flash_attention_2' and use_cache:
|
1193 |
+
is_padding_right = attention_mask[:, -1].sum().item() != batch_size
|
1194 |
+
if is_padding_right:
|
1195 |
+
raise ValueError(
|
1196 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
1197 |
+
' this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to '
|
1198 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
1199 |
+
)
|
1200 |
+
|
1201 |
+
if self._attn_implementation == 'flash_attention_2':
|
1202 |
+
# 2d mask is passed through the layers
|
1203 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
1204 |
+
else:
|
1205 |
+
# 4d mask is passed through the layers
|
1206 |
+
attention_mask = _prepare_4d_causal_attention_mask(
|
1207 |
+
attention_mask,
|
1208 |
+
(batch_size, seq_length),
|
1209 |
+
inputs_embeds,
|
1210 |
+
past_key_values_length,
|
1211 |
+
sliding_window=self.config.sliding_window,
|
1212 |
+
)
|
1213 |
+
|
1214 |
+
hidden_states = inputs_embeds
|
1215 |
+
|
1216 |
+
# decoder layers
|
1217 |
+
all_hidden_states = () if output_hidden_states else None
|
1218 |
+
all_self_attns = () if output_attentions else None
|
1219 |
+
next_decoder_cache = None
|
1220 |
+
|
1221 |
+
for decoder_layer in self.layers:
|
1222 |
+
if output_hidden_states:
|
1223 |
+
all_hidden_states += (hidden_states,)
|
1224 |
+
|
1225 |
+
if self.gradient_checkpointing and self.training:
|
1226 |
+
layer_outputs = self._gradient_checkpointing_func(
|
1227 |
+
decoder_layer.__call__,
|
1228 |
+
hidden_states,
|
1229 |
+
attention_mask,
|
1230 |
+
position_ids,
|
1231 |
+
past_key_values,
|
1232 |
+
phantom_position,
|
1233 |
+
output_attentions,
|
1234 |
+
use_cache,
|
1235 |
+
)
|
1236 |
+
else:
|
1237 |
+
layer_outputs = decoder_layer(
|
1238 |
+
hidden_states,
|
1239 |
+
attention_mask=attention_mask,
|
1240 |
+
position_ids=position_ids,
|
1241 |
+
past_key_value=past_key_values,
|
1242 |
+
phantom_position=phantom_position,
|
1243 |
+
output_attentions=output_attentions,
|
1244 |
+
use_cache=use_cache,
|
1245 |
+
)
|
1246 |
+
|
1247 |
+
hidden_states = layer_outputs[0]
|
1248 |
+
|
1249 |
+
if use_cache:
|
1250 |
+
next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
1251 |
+
|
1252 |
+
if output_attentions:
|
1253 |
+
all_self_attns += (layer_outputs[1],)
|
1254 |
+
|
1255 |
+
hidden_states = self.norm(hidden_states)
|
1256 |
+
|
1257 |
+
# add hidden states from the last decoder layer
|
1258 |
+
if output_hidden_states:
|
1259 |
+
all_hidden_states += (hidden_states,)
|
1260 |
+
|
1261 |
+
next_cache = None
|
1262 |
+
if use_cache:
|
1263 |
+
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
|
1264 |
+
if not return_dict:
|
1265 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1266 |
+
return BaseModelOutputWithPast(
|
1267 |
+
last_hidden_state=hidden_states,
|
1268 |
+
past_key_values=next_cache,
|
1269 |
+
hidden_states=all_hidden_states,
|
1270 |
+
attentions=all_self_attns,
|
1271 |
+
)
|
1272 |
+
|
1273 |
+
|
1274 |
+
class Phi3ForCausalLM(Phi3PreTrainedModel):
|
1275 |
+
_tied_weights_keys = ['lm_head.weight']
|
1276 |
+
|
1277 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi3
|
1278 |
+
def __init__(self, config):
|
1279 |
+
super().__init__(config)
|
1280 |
+
self.model = Phi3Model(config)
|
1281 |
+
self.vocab_size = config.vocab_size
|
1282 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1283 |
+
|
1284 |
+
# Initialize weights and apply final processing
|
1285 |
+
self.post_init()
|
1286 |
+
|
1287 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
|
1288 |
+
def get_input_embeddings(self):
|
1289 |
+
return self.model.embed_tokens
|
1290 |
+
|
1291 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
|
1292 |
+
def set_input_embeddings(self, value):
|
1293 |
+
self.model.embed_tokens = value
|
1294 |
+
|
1295 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
|
1296 |
+
def get_output_embeddings(self):
|
1297 |
+
return self.lm_head
|
1298 |
+
|
1299 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
|
1300 |
+
def set_output_embeddings(self, new_embeddings):
|
1301 |
+
self.lm_head = new_embeddings
|
1302 |
+
|
1303 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
|
1304 |
+
def set_decoder(self, decoder):
|
1305 |
+
self.model = decoder
|
1306 |
+
|
1307 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
|
1308 |
+
def get_decoder(self):
|
1309 |
+
return self.model
|
1310 |
+
|
1311 |
+
# Ignore copy
|
1312 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1313 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1314 |
+
def forward(
|
1315 |
+
self,
|
1316 |
+
input_ids: torch.LongTensor = None,
|
1317 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1318 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1319 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1320 |
+
phantom_position: torch.BoolTensor = None,
|
1321 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1322 |
+
labels: Optional[torch.LongTensor] = None,
|
1323 |
+
use_cache: Optional[bool] = None,
|
1324 |
+
output_attentions: Optional[bool] = None,
|
1325 |
+
output_hidden_states: Optional[bool] = None,
|
1326 |
+
return_dict: Optional[bool] = None,
|
1327 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1328 |
+
r"""
|
1329 |
+
Args:
|
1330 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1331 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1332 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1333 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1334 |
+
|
1335 |
+
Returns:
|
1336 |
+
|
1337 |
+
Example:
|
1338 |
+
|
1339 |
+
```python
|
1340 |
+
>>> from transformers import AutoTokenizer, Phi3ForCausalLM
|
1341 |
+
|
1342 |
+
>>> model = Phi3ForCausalLM.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
1343 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-3-mini-4k-instruct")
|
1344 |
+
|
1345 |
+
>>> prompt = "This is an example script ."
|
1346 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1347 |
+
|
1348 |
+
>>> # Generate
|
1349 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1350 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1351 |
+
'This is an example script .\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'
|
1352 |
+
```"""
|
1353 |
+
|
1354 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1355 |
+
output_hidden_states = (
|
1356 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1357 |
+
)
|
1358 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1359 |
+
|
1360 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1361 |
+
outputs = self.model(
|
1362 |
+
input_ids=input_ids,
|
1363 |
+
attention_mask=attention_mask,
|
1364 |
+
position_ids=position_ids,
|
1365 |
+
past_key_values=past_key_values,
|
1366 |
+
phantom_position=phantom_position,
|
1367 |
+
inputs_embeds=inputs_embeds,
|
1368 |
+
use_cache=use_cache,
|
1369 |
+
output_attentions=output_attentions,
|
1370 |
+
output_hidden_states=output_hidden_states,
|
1371 |
+
return_dict=return_dict,
|
1372 |
+
)
|
1373 |
+
|
1374 |
+
hidden_states = outputs[0]
|
1375 |
+
logits = self.lm_head(hidden_states)
|
1376 |
+
logits = logits.float()
|
1377 |
+
|
1378 |
+
loss = None
|
1379 |
+
if labels is not None:
|
1380 |
+
# Shift so that tokens < n predict n
|
1381 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1382 |
+
shift_labels = labels[..., 1:].contiguous()
|
1383 |
+
# Flatten the tokens
|
1384 |
+
loss_fct = CrossEntropyLoss()
|
1385 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1386 |
+
shift_labels = shift_labels.view(-1)
|
1387 |
+
# Enable model parallelism
|
1388 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1389 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1390 |
+
|
1391 |
+
if not return_dict:
|
1392 |
+
output = (logits,) + outputs[1:]
|
1393 |
+
return (loss,) + output if loss is not None else output
|
1394 |
+
|
1395 |
+
return CausalLMOutputWithPast(
|
1396 |
+
loss=loss,
|
1397 |
+
logits=logits,
|
1398 |
+
past_key_values=outputs.past_key_values,
|
1399 |
+
hidden_states=outputs.hidden_states,
|
1400 |
+
attentions=outputs.attentions,
|
1401 |
+
)
|
1402 |
+
|
1403 |
+
# Copied from transformers.models.persimmon.modeling_persimmon.PersimmonForCausalLM.prepare_inputs_for_generation
|
1404 |
+
def prepare_inputs_for_generation(
|
1405 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1406 |
+
):
|
1407 |
+
if past_key_values is not None:
|
1408 |
+
if isinstance(past_key_values, Cache):
|
1409 |
+
cache_length = past_key_values.get_seq_length()
|
1410 |
+
past_length = past_key_values.seen_tokens
|
1411 |
+
max_cache_length = past_key_values.get_max_length()
|
1412 |
+
else:
|
1413 |
+
cache_length = past_length = past_key_values[0][0].shape[2]
|
1414 |
+
max_cache_length = None
|
1415 |
+
|
1416 |
+
# Keep only the unprocessed tokens:
|
1417 |
+
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
|
1418 |
+
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
|
1419 |
+
# input)
|
1420 |
+
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
1421 |
+
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
1422 |
+
# 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
|
1423 |
+
# input_ids based on the past_length.
|
1424 |
+
elif past_length < input_ids.shape[1]:
|
1425 |
+
input_ids = input_ids[:, past_length:]
|
1426 |
+
# 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
|
1427 |
+
|
1428 |
+
# If we are about to go beyond the maximum cache length, we need to crop the input attention mask.
|
1429 |
+
if (
|
1430 |
+
max_cache_length is not None
|
1431 |
+
and attention_mask is not None
|
1432 |
+
and cache_length + input_ids.shape[1] > max_cache_length
|
1433 |
+
):
|
1434 |
+
attention_mask = attention_mask[:, -max_cache_length:]
|
1435 |
+
|
1436 |
+
position_ids = kwargs.get('position_ids', None)
|
1437 |
+
if attention_mask is not None and position_ids is None:
|
1438 |
+
# create position_ids on the fly for batch generation
|
1439 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1440 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1441 |
+
if past_key_values:
|
1442 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1443 |
+
|
1444 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1445 |
+
if inputs_embeds is not None and past_key_values is None:
|
1446 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
1447 |
+
else:
|
1448 |
+
model_inputs = {'input_ids': input_ids}
|
1449 |
+
|
1450 |
+
model_inputs.update(
|
1451 |
+
{
|
1452 |
+
'position_ids': position_ids,
|
1453 |
+
'past_key_values': past_key_values,
|
1454 |
+
'phantom_position' : kwargs.get('phantom_position'),
|
1455 |
+
'use_cache': kwargs.get('use_cache'),
|
1456 |
+
'attention_mask': attention_mask,
|
1457 |
+
}
|
1458 |
+
)
|
1459 |
+
return model_inputs
|
1460 |
+
|
1461 |
+
@staticmethod
|
1462 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM._reorder_cache
|
1463 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1464 |
+
reordered_past = ()
|
1465 |
+
for layer_past in past_key_values:
|
1466 |
+
reordered_past += (
|
1467 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1468 |
+
)
|
1469 |
+
return reordered_past
|
1470 |
+
|
1471 |
+
|
1472 |
+
@add_start_docstrings(
|
1473 |
+
"""
|
1474 |
+
The [`Phi3Model`] with a sequence classification head on top (linear layer).
|
1475 |
+
|
1476 |
+
[`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
1477 |
+
(e.g. GPT-2) do.
|
1478 |
+
|
1479 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1480 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1481 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1482 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1483 |
+
each row of the batch).
|
1484 |
+
""",
|
1485 |
+
PHI3_START_DOCSTRING,
|
1486 |
+
)
|
1487 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaForSequenceClassification with Llama->Phi3, LLAMA->PHI3, self.transformer->self.model, transformer_outputs->model_outputs
|
1488 |
+
class Phi3ForSequenceClassification(Phi3PreTrainedModel):
|
1489 |
+
def __init__(self, config):
|
1490 |
+
super().__init__(config)
|
1491 |
+
self.num_labels = config.num_labels
|
1492 |
+
self.model = Phi3Model(config)
|
1493 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1494 |
+
|
1495 |
+
# Initialize weights and apply final processing
|
1496 |
+
self.post_init()
|
1497 |
+
|
1498 |
+
def get_input_embeddings(self):
|
1499 |
+
return self.model.embed_tokens
|
1500 |
+
|
1501 |
+
def set_input_embeddings(self, value):
|
1502 |
+
self.model.embed_tokens = value
|
1503 |
+
|
1504 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1505 |
+
def forward(
|
1506 |
+
self,
|
1507 |
+
input_ids: torch.LongTensor = None,
|
1508 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1509 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1510 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1511 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1512 |
+
labels: Optional[torch.LongTensor] = None,
|
1513 |
+
use_cache: Optional[bool] = None,
|
1514 |
+
output_attentions: Optional[bool] = None,
|
1515 |
+
output_hidden_states: Optional[bool] = None,
|
1516 |
+
return_dict: Optional[bool] = None,
|
1517 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1518 |
+
r"""
|
1519 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1520 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1521 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1522 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1523 |
+
"""
|
1524 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1525 |
+
|
1526 |
+
model_outputs = self.model(
|
1527 |
+
input_ids,
|
1528 |
+
attention_mask=attention_mask,
|
1529 |
+
position_ids=position_ids,
|
1530 |
+
past_key_values=past_key_values,
|
1531 |
+
inputs_embeds=inputs_embeds,
|
1532 |
+
use_cache=use_cache,
|
1533 |
+
output_attentions=output_attentions,
|
1534 |
+
output_hidden_states=output_hidden_states,
|
1535 |
+
return_dict=return_dict,
|
1536 |
+
)
|
1537 |
+
hidden_states = model_outputs[0]
|
1538 |
+
logits = self.score(hidden_states)
|
1539 |
+
|
1540 |
+
if input_ids is not None:
|
1541 |
+
batch_size = input_ids.shape[0]
|
1542 |
+
else:
|
1543 |
+
batch_size = inputs_embeds.shape[0]
|
1544 |
+
|
1545 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1546 |
+
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
|
1547 |
+
if self.config.pad_token_id is None:
|
1548 |
+
sequence_lengths = -1
|
1549 |
+
else:
|
1550 |
+
if input_ids is not None:
|
1551 |
+
# if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
|
1552 |
+
sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
1553 |
+
sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
1554 |
+
sequence_lengths = sequence_lengths.to(logits.device)
|
1555 |
+
else:
|
1556 |
+
sequence_lengths = -1
|
1557 |
+
|
1558 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1559 |
+
|
1560 |
+
loss = None
|
1561 |
+
if labels is not None:
|
1562 |
+
labels = labels.to(logits.device)
|
1563 |
+
if self.config.problem_type is None:
|
1564 |
+
if self.num_labels == 1:
|
1565 |
+
self.config.problem_type = 'regression'
|
1566 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1567 |
+
self.config.problem_type = 'single_label_classification'
|
1568 |
+
else:
|
1569 |
+
self.config.problem_type = 'multi_label_classification'
|
1570 |
+
|
1571 |
+
if self.config.problem_type == 'regression':
|
1572 |
+
loss_fct = MSELoss()
|
1573 |
+
if self.num_labels == 1:
|
1574 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1575 |
+
else:
|
1576 |
+
loss = loss_fct(pooled_logits, labels)
|
1577 |
+
elif self.config.problem_type == 'single_label_classification':
|
1578 |
+
loss_fct = CrossEntropyLoss()
|
1579 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1580 |
+
elif self.config.problem_type == 'multi_label_classification':
|
1581 |
+
loss_fct = BCEWithLogitsLoss()
|
1582 |
+
loss = loss_fct(pooled_logits, labels)
|
1583 |
+
if not return_dict:
|
1584 |
+
output = (pooled_logits,) + model_outputs[1:]
|
1585 |
+
return ((loss,) + output) if loss is not None else output
|
1586 |
+
|
1587 |
+
return SequenceClassifierOutputWithPast(
|
1588 |
+
loss=loss,
|
1589 |
+
logits=pooled_logits,
|
1590 |
+
past_key_values=model_outputs.past_key_values,
|
1591 |
+
hidden_states=model_outputs.hidden_states,
|
1592 |
+
attentions=model_outputs.attentions,
|
1593 |
+
)
|
1594 |
+
|
1595 |
+
|
1596 |
+
@add_start_docstrings(
|
1597 |
+
"""
|
1598 |
+
[`Phi3Model`] with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
|
1599 |
+
Named-Entity-Recognition (NER) tasks.
|
1600 |
+
""",
|
1601 |
+
PHI3_START_DOCSTRING,
|
1602 |
+
)
|
1603 |
+
# Copied from transformers.models.mpt.modeling_mpt.MptForTokenClassification with Mpt->Phi3,MPT->PHI3,self.transformer->self.model,transformer_outputs->model_outputs
|
1604 |
+
class Phi3ForTokenClassification(Phi3PreTrainedModel):
|
1605 |
+
def __init__(self, config: Phi3Config):
|
1606 |
+
super().__init__(config)
|
1607 |
+
self.num_labels = config.num_labels
|
1608 |
+
|
1609 |
+
self.model = Phi3Model(config)
|
1610 |
+
if hasattr(config, 'classifier_dropout') and config.classifier_dropout is not None:
|
1611 |
+
classifier_dropout = config.classifier_dropout
|
1612 |
+
elif hasattr(config, 'hidden_dropout') and config.hidden_dropout is not None:
|
1613 |
+
classifier_dropout = config.hidden_dropout
|
1614 |
+
else:
|
1615 |
+
classifier_dropout = 0.1
|
1616 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1617 |
+
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
1618 |
+
|
1619 |
+
# Initialize weights and apply final processing
|
1620 |
+
self.post_init()
|
1621 |
+
|
1622 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1623 |
+
@add_code_sample_docstrings(
|
1624 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1625 |
+
output_type=TokenClassifierOutput,
|
1626 |
+
config_class=_CONFIG_FOR_DOC,
|
1627 |
+
)
|
1628 |
+
def forward(
|
1629 |
+
self,
|
1630 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1631 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
|
1632 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1633 |
+
inputs_embeds: Optional[torch.Tensor] = None,
|
1634 |
+
labels: Optional[torch.Tensor] = None,
|
1635 |
+
use_cache: Optional[bool] = None,
|
1636 |
+
output_attentions: Optional[bool] = None,
|
1637 |
+
output_hidden_states: Optional[bool] = None,
|
1638 |
+
return_dict: Optional[bool] = None,
|
1639 |
+
**deprecated_arguments,
|
1640 |
+
) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
|
1641 |
+
r"""
|
1642 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1643 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1644 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1645 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1646 |
+
"""
|
1647 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1648 |
+
|
1649 |
+
model_outputs = self.model(
|
1650 |
+
input_ids,
|
1651 |
+
past_key_values=past_key_values,
|
1652 |
+
attention_mask=attention_mask,
|
1653 |
+
inputs_embeds=inputs_embeds,
|
1654 |
+
use_cache=use_cache,
|
1655 |
+
output_attentions=output_attentions,
|
1656 |
+
output_hidden_states=output_hidden_states,
|
1657 |
+
return_dict=return_dict,
|
1658 |
+
)
|
1659 |
+
|
1660 |
+
hidden_states = model_outputs[0]
|
1661 |
+
hidden_states = self.dropout(hidden_states)
|
1662 |
+
logits = self.classifier(hidden_states)
|
1663 |
+
|
1664 |
+
loss = None
|
1665 |
+
if labels is not None:
|
1666 |
+
# move labels to correct device to enable model parallelism
|
1667 |
+
labels = labels.to(logits.device)
|
1668 |
+
batch_size, seq_length = labels.shape
|
1669 |
+
loss_fct = CrossEntropyLoss()
|
1670 |
+
loss = loss_fct(
|
1671 |
+
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
|
1672 |
+
)
|
1673 |
+
|
1674 |
+
if not return_dict:
|
1675 |
+
output = (logits,) + model_outputs[2:]
|
1676 |
+
return ((loss,) + output) if loss is not None else output
|
1677 |
+
|
1678 |
+
return TokenClassifierOutput(
|
1679 |
+
loss=loss,
|
1680 |
+
logits=logits,
|
1681 |
+
hidden_states=model_outputs.hidden_states,
|
1682 |
+
attentions=model_outputs.attentions,
|
1683 |
+
)
|
model/arch_7b/configuration_intern_vit.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
logger = logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
class InternVisionConfig(PretrainedConfig):
|
11 |
+
r"""
|
12 |
+
This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
|
13 |
+
instantiate a vision encoder according to the specified arguments, defining the model architecture.
|
14 |
+
|
15 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
16 |
+
documentation from [`PretrainedConfig`] for more information.
|
17 |
+
|
18 |
+
Args:
|
19 |
+
num_channels (`int`, *optional*, defaults to 3):
|
20 |
+
Number of color channels in the input images (e.g., 3 for RGB).
|
21 |
+
patch_size (`int`, *optional*, defaults to 14):
|
22 |
+
The size (resolution) of each patch.
|
23 |
+
image_size (`int`, *optional*, defaults to 224):
|
24 |
+
The size (resolution) of each image.
|
25 |
+
qkv_bias (`bool`, *optional*, defaults to `False`):
|
26 |
+
Whether to add a bias to the queries and values in the self-attention layers.
|
27 |
+
hidden_size (`int`, *optional*, defaults to 3200):
|
28 |
+
Dimensionality of the encoder layers and the pooler layer.
|
29 |
+
num_attention_heads (`int`, *optional*, defaults to 25):
|
30 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
31 |
+
intermediate_size (`int`, *optional*, defaults to 12800):
|
32 |
+
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
|
33 |
+
qk_normalization (`bool`, *optional*, defaults to `True`):
|
34 |
+
Whether to normalize the queries and keys in the self-attention layers.
|
35 |
+
num_hidden_layers (`int`, *optional*, defaults to 48):
|
36 |
+
Number of hidden layers in the Transformer encoder.
|
37 |
+
use_flash_attn (`bool`, *optional*, defaults to `True`):
|
38 |
+
Whether to use flash attention mechanism.
|
39 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
|
40 |
+
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
|
41 |
+
`"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
|
42 |
+
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
|
43 |
+
The epsilon used by the layer normalization layers.
|
44 |
+
dropout (`float`, *optional*, defaults to 0.0):
|
45 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
46 |
+
drop_path_rate (`float`, *optional*, defaults to 0.0):
|
47 |
+
Dropout rate for stochastic depth.
|
48 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
49 |
+
The dropout ratio for the attention probabilities.
|
50 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
51 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
52 |
+
initializer_factor (`float`, *optional*, defaults to 0.1):
|
53 |
+
A factor for layer scale.
|
54 |
+
"""
|
55 |
+
|
56 |
+
model_type = 'intern_vit_300m'
|
57 |
+
|
58 |
+
def __init__(
|
59 |
+
self,
|
60 |
+
num_channels=3,
|
61 |
+
patch_size=14,
|
62 |
+
image_size=224,
|
63 |
+
qkv_bias=False,
|
64 |
+
hidden_size=3200,
|
65 |
+
num_attention_heads=25,
|
66 |
+
intermediate_size=12800,
|
67 |
+
qk_normalization=True,
|
68 |
+
num_hidden_layers=48,
|
69 |
+
use_flash_attn=True,
|
70 |
+
hidden_act='gelu',
|
71 |
+
norm_type='rms_norm',
|
72 |
+
layer_norm_eps=1e-6,
|
73 |
+
dropout=0.0,
|
74 |
+
drop_path_rate=0.0,
|
75 |
+
attention_dropout=0.0,
|
76 |
+
initializer_range=0.02,
|
77 |
+
initializer_factor=0.1,
|
78 |
+
**kwargs,
|
79 |
+
):
|
80 |
+
super().__init__(**kwargs)
|
81 |
+
|
82 |
+
self.hidden_size = hidden_size
|
83 |
+
self.intermediate_size = intermediate_size
|
84 |
+
self.dropout = dropout
|
85 |
+
self.drop_path_rate = drop_path_rate
|
86 |
+
self.num_hidden_layers = num_hidden_layers
|
87 |
+
self.num_attention_heads = num_attention_heads
|
88 |
+
self.num_channels = num_channels
|
89 |
+
self.patch_size = patch_size
|
90 |
+
self.image_size = image_size
|
91 |
+
self.initializer_range = initializer_range
|
92 |
+
self.initializer_factor = initializer_factor
|
93 |
+
self.attention_dropout = attention_dropout
|
94 |
+
self.layer_norm_eps = layer_norm_eps
|
95 |
+
self.hidden_act = hidden_act
|
96 |
+
self.norm_type = norm_type
|
97 |
+
self.qkv_bias = qkv_bias
|
98 |
+
self.qk_normalization = qk_normalization
|
99 |
+
self.use_flash_attn = use_flash_attn
|
100 |
+
|
101 |
+
@classmethod
|
102 |
+
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
|
103 |
+
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
|
104 |
+
|
105 |
+
if 'vision_config' in config_dict:
|
106 |
+
config_dict = config_dict['vision_config']
|
107 |
+
|
108 |
+
if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
|
109 |
+
logger.warning(
|
110 |
+
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
|
111 |
+
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
|
112 |
+
)
|
113 |
+
|
114 |
+
return cls.from_dict(config_dict, **kwargs)
|
model/arch_7b/configuration_internlm2.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/configuration_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" InternLM2 model configuration"""
|
17 |
+
|
18 |
+
from transformers.configuration_utils import PretrainedConfig
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
logger = logging.get_logger(__name__)
|
22 |
+
|
23 |
+
INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
|
24 |
+
|
25 |
+
|
26 |
+
# Modified from transformers.model.llama.configuration_llama.LlamaConfig
|
27 |
+
class InternLM2Config(PretrainedConfig):
|
28 |
+
r"""
|
29 |
+
This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
|
30 |
+
an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
|
31 |
+
configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
|
32 |
+
|
33 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
34 |
+
documentation from [`PretrainedConfig`] for more information.
|
35 |
+
|
36 |
+
|
37 |
+
Args:
|
38 |
+
vocab_size (`int`, *optional*, defaults to 32000):
|
39 |
+
Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
|
40 |
+
`inputs_ids` passed when calling [`InternLM2Model`]
|
41 |
+
hidden_size (`int`, *optional*, defaults to 4096):
|
42 |
+
Dimension of the hidden representations.
|
43 |
+
intermediate_size (`int`, *optional*, defaults to 11008):
|
44 |
+
Dimension of the MLP representations.
|
45 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
46 |
+
Number of hidden layers in the Transformer encoder.
|
47 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
48 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
49 |
+
num_key_value_heads (`int`, *optional*):
|
50 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
51 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
52 |
+
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
53 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
54 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
55 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
56 |
+
`num_attention_heads`.
|
57 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
58 |
+
The non-linear activation function (function or string) in the decoder.
|
59 |
+
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
60 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
61 |
+
just in case (e.g., 512 or 1024 or 2048).
|
62 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
63 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
64 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
|
65 |
+
The epsilon used by the rms normalization layers.
|
66 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
67 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
68 |
+
relevant if `config.is_decoder=True`.
|
69 |
+
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
70 |
+
Whether to tie weight embeddings
|
71 |
+
Example:
|
72 |
+
|
73 |
+
"""
|
74 |
+
model_type = 'internlm2'
|
75 |
+
_auto_class = 'AutoConfig'
|
76 |
+
|
77 |
+
def __init__( # pylint: disable=W0102
|
78 |
+
self,
|
79 |
+
vocab_size=103168,
|
80 |
+
hidden_size=4096,
|
81 |
+
intermediate_size=11008,
|
82 |
+
num_hidden_layers=32,
|
83 |
+
num_attention_heads=32,
|
84 |
+
num_key_value_heads=None,
|
85 |
+
hidden_act='silu',
|
86 |
+
max_position_embeddings=2048,
|
87 |
+
initializer_range=0.02,
|
88 |
+
rms_norm_eps=1e-6,
|
89 |
+
use_cache=True,
|
90 |
+
pad_token_id=0,
|
91 |
+
bos_token_id=1,
|
92 |
+
eos_token_id=2,
|
93 |
+
tie_word_embeddings=False,
|
94 |
+
bias=True,
|
95 |
+
rope_theta=10000,
|
96 |
+
rope_scaling=None,
|
97 |
+
attn_implementation='eager',
|
98 |
+
**kwargs,
|
99 |
+
):
|
100 |
+
self.vocab_size = vocab_size
|
101 |
+
self.max_position_embeddings = max_position_embeddings
|
102 |
+
self.hidden_size = hidden_size
|
103 |
+
self.intermediate_size = intermediate_size
|
104 |
+
self.num_hidden_layers = num_hidden_layers
|
105 |
+
self.num_attention_heads = num_attention_heads
|
106 |
+
self.bias = bias
|
107 |
+
|
108 |
+
if num_key_value_heads is None:
|
109 |
+
num_key_value_heads = num_attention_heads
|
110 |
+
self.num_key_value_heads = num_key_value_heads
|
111 |
+
|
112 |
+
self.hidden_act = hidden_act
|
113 |
+
self.initializer_range = initializer_range
|
114 |
+
self.rms_norm_eps = rms_norm_eps
|
115 |
+
self.use_cache = use_cache
|
116 |
+
self.rope_theta = rope_theta
|
117 |
+
self.rope_scaling = rope_scaling
|
118 |
+
self._rope_scaling_validation()
|
119 |
+
|
120 |
+
self.attn_implementation = attn_implementation
|
121 |
+
if self.attn_implementation is None:
|
122 |
+
self.attn_implementation = 'eager'
|
123 |
+
super().__init__(
|
124 |
+
pad_token_id=pad_token_id,
|
125 |
+
bos_token_id=bos_token_id,
|
126 |
+
eos_token_id=eos_token_id,
|
127 |
+
tie_word_embeddings=tie_word_embeddings,
|
128 |
+
**kwargs,
|
129 |
+
)
|
130 |
+
|
131 |
+
def _rope_scaling_validation(self):
|
132 |
+
"""
|
133 |
+
Validate the `rope_scaling` configuration.
|
134 |
+
"""
|
135 |
+
if self.rope_scaling is None:
|
136 |
+
return
|
137 |
+
|
138 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
|
139 |
+
raise ValueError(
|
140 |
+
'`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
|
141 |
+
f'got {self.rope_scaling}'
|
142 |
+
)
|
143 |
+
rope_scaling_type = self.rope_scaling.get('type', None)
|
144 |
+
rope_scaling_factor = self.rope_scaling.get('factor', None)
|
145 |
+
if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
|
146 |
+
raise ValueError(
|
147 |
+
f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
|
148 |
+
)
|
149 |
+
if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
|
150 |
+
raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
|
model/arch_7b/configuration_phantom.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
|
3 |
+
from transformers import LlamaConfig
|
4 |
+
from transformers.configuration_utils import PretrainedConfig
|
5 |
+
from transformers.utils import logging
|
6 |
+
|
7 |
+
from .configuration_intern_vit import InternVisionConfig
|
8 |
+
from .configuration_internlm2 import InternLM2Config
|
9 |
+
|
10 |
+
logger = logging.get_logger(__name__)
|
11 |
+
|
12 |
+
|
13 |
+
class PhantomConfig(PretrainedConfig):
|
14 |
+
model_type = 'phantom'
|
15 |
+
is_composition = True
|
16 |
+
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
vision_config=None,
|
20 |
+
llm_config=None,
|
21 |
+
use_backbone_lora=0,
|
22 |
+
use_llm_lora=0,
|
23 |
+
force_image_size=None,
|
24 |
+
downsample_ratio=0.5,
|
25 |
+
template=None,
|
26 |
+
dynamic_image_size=False,
|
27 |
+
use_thumbnail=False,
|
28 |
+
min_dynamic_patch=1,
|
29 |
+
max_dynamic_patch=6,
|
30 |
+
**kwargs):
|
31 |
+
super().__init__(**kwargs)
|
32 |
+
|
33 |
+
if vision_config is None:
|
34 |
+
vision_config = {}
|
35 |
+
logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
|
36 |
+
|
37 |
+
if llm_config is None:
|
38 |
+
llm_config = {}
|
39 |
+
logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
|
40 |
+
|
41 |
+
self.vision_config = InternVisionConfig(**vision_config)
|
42 |
+
if llm_config['architectures'][0] == 'LlamaForCausalLM':
|
43 |
+
self.llm_config = LlamaConfig(**llm_config)
|
44 |
+
elif llm_config['architectures'][0] == 'InternLM2ForCausalLM':
|
45 |
+
self.llm_config = InternLM2Config(**llm_config)
|
46 |
+
else:
|
47 |
+
raise ValueError('Unsupported architecture: {}'.format(llm_config['architectures'][0]))
|
48 |
+
self.use_backbone_lora = use_backbone_lora
|
49 |
+
self.use_llm_lora = use_llm_lora
|
50 |
+
self.force_image_size = force_image_size
|
51 |
+
self.downsample_ratio = downsample_ratio
|
52 |
+
self.template = template
|
53 |
+
self.dynamic_image_size = dynamic_image_size
|
54 |
+
self.use_thumbnail = use_thumbnail
|
55 |
+
self.min_dynamic_patch = min_dynamic_patch
|
56 |
+
self.max_dynamic_patch = max_dynamic_patch
|
57 |
+
|
58 |
+
logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
|
59 |
+
logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
|
60 |
+
|
61 |
+
def to_dict(self):
|
62 |
+
"""
|
63 |
+
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
|
67 |
+
"""
|
68 |
+
output = copy.deepcopy(self.__dict__)
|
69 |
+
output['vision_config'] = self.vision_config.to_dict()
|
70 |
+
output['llm_config'] = self.llm_config.to_dict()
|
71 |
+
output['model_type'] = self.__class__.model_type
|
72 |
+
output['use_backbone_lora'] = self.use_backbone_lora
|
73 |
+
output['use_llm_lora'] = self.use_llm_lora
|
74 |
+
output['force_image_size'] = self.force_image_size
|
75 |
+
output['downsample_ratio'] = self.downsample_ratio
|
76 |
+
output['template'] = self.template
|
77 |
+
output['dynamic_image_size'] = self.dynamic_image_size
|
78 |
+
output['use_thumbnail'] = self.use_thumbnail
|
79 |
+
output['min_dynamic_patch'] = self.min_dynamic_patch
|
80 |
+
output['max_dynamic_patch'] = self.max_dynamic_patch
|
81 |
+
|
82 |
+
return output
|
model/arch_7b/modeling_intern_vit.py
ADDED
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
import torch.utils.checkpoint
|
6 |
+
from einops import rearrange
|
7 |
+
from timm.models.layers import DropPath
|
8 |
+
from torch import nn
|
9 |
+
from transformers.activations import ACT2FN
|
10 |
+
from transformers.modeling_outputs import (BaseModelOutput,
|
11 |
+
BaseModelOutputWithPooling)
|
12 |
+
from transformers.modeling_utils import PreTrainedModel
|
13 |
+
from transformers.utils import logging
|
14 |
+
|
15 |
+
from .configuration_intern_vit import InternVisionConfig
|
16 |
+
|
17 |
+
try:
|
18 |
+
try: # v1
|
19 |
+
from flash_attn.flash_attn_interface import \
|
20 |
+
flash_attn_unpadded_qkvpacked_func
|
21 |
+
except: # v2
|
22 |
+
from flash_attn.flash_attn_interface import \
|
23 |
+
flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
24 |
+
|
25 |
+
from flash_attn.bert_padding import pad_input, unpad_input
|
26 |
+
|
27 |
+
has_flash_attn = True
|
28 |
+
except:
|
29 |
+
print('FlashAttention is not installed.')
|
30 |
+
has_flash_attn = False
|
31 |
+
|
32 |
+
logger = logging.get_logger(__name__)
|
33 |
+
|
34 |
+
|
35 |
+
class FlashAttention(nn.Module):
|
36 |
+
"""Implement the scaled dot product attention with softmax.
|
37 |
+
Arguments
|
38 |
+
---------
|
39 |
+
softmax_scale: The temperature to use for the softmax attention.
|
40 |
+
(default: 1/sqrt(d_keys) where d_keys is computed at
|
41 |
+
runtime)
|
42 |
+
attention_dropout: The dropout rate to apply to the attention
|
43 |
+
(default: 0.0)
|
44 |
+
"""
|
45 |
+
|
46 |
+
def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
|
47 |
+
super().__init__()
|
48 |
+
self.softmax_scale = softmax_scale
|
49 |
+
self.dropout_p = attention_dropout
|
50 |
+
|
51 |
+
def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
|
52 |
+
max_s=None, need_weights=False):
|
53 |
+
"""Implements the multihead softmax attention.
|
54 |
+
Arguments
|
55 |
+
---------
|
56 |
+
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
|
57 |
+
if unpadded: (nnz, 3, h, d)
|
58 |
+
key_padding_mask: a bool tensor of shape (B, S)
|
59 |
+
"""
|
60 |
+
assert not need_weights
|
61 |
+
assert qkv.dtype in [torch.float16, torch.bfloat16]
|
62 |
+
assert qkv.is_cuda
|
63 |
+
|
64 |
+
if cu_seqlens is None:
|
65 |
+
batch_size = qkv.shape[0]
|
66 |
+
seqlen = qkv.shape[1]
|
67 |
+
if key_padding_mask is None:
|
68 |
+
qkv = rearrange(qkv, 'b s ... -> (b s) ...')
|
69 |
+
max_s = seqlen
|
70 |
+
cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
|
71 |
+
device=qkv.device)
|
72 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
73 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
74 |
+
softmax_scale=self.softmax_scale, causal=causal
|
75 |
+
)
|
76 |
+
output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
|
77 |
+
else:
|
78 |
+
nheads = qkv.shape[-2]
|
79 |
+
x = rearrange(qkv, 'b s three h d -> b s (three h d)')
|
80 |
+
x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
|
81 |
+
x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
|
82 |
+
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
83 |
+
x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
84 |
+
softmax_scale=self.softmax_scale, causal=causal
|
85 |
+
)
|
86 |
+
output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
|
87 |
+
indices, batch_size, seqlen),
|
88 |
+
'b s (h d) -> b s h d', h=nheads)
|
89 |
+
else:
|
90 |
+
assert max_s is not None
|
91 |
+
output = flash_attn_unpadded_qkvpacked_func(
|
92 |
+
qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
|
93 |
+
softmax_scale=self.softmax_scale, causal=causal
|
94 |
+
)
|
95 |
+
|
96 |
+
return output, None
|
97 |
+
|
98 |
+
|
99 |
+
class InternRMSNorm(nn.Module):
|
100 |
+
def __init__(self, hidden_size, eps=1e-6):
|
101 |
+
super().__init__()
|
102 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
103 |
+
self.variance_epsilon = eps
|
104 |
+
|
105 |
+
def forward(self, hidden_states):
|
106 |
+
input_dtype = hidden_states.dtype
|
107 |
+
hidden_states = hidden_states.to(torch.float32)
|
108 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
109 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
110 |
+
return self.weight * hidden_states.to(input_dtype)
|
111 |
+
|
112 |
+
|
113 |
+
try:
|
114 |
+
from apex.normalization import FusedRMSNorm
|
115 |
+
|
116 |
+
InternRMSNorm = FusedRMSNorm # noqa
|
117 |
+
|
118 |
+
logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
|
119 |
+
except ImportError:
|
120 |
+
# using the normal InternRMSNorm
|
121 |
+
pass
|
122 |
+
except Exception:
|
123 |
+
logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
|
124 |
+
pass
|
125 |
+
|
126 |
+
|
127 |
+
NORM2FN = {
|
128 |
+
'rms_norm': InternRMSNorm,
|
129 |
+
'layer_norm': nn.LayerNorm,
|
130 |
+
}
|
131 |
+
|
132 |
+
|
133 |
+
class InternVisionEmbeddings(nn.Module):
|
134 |
+
def __init__(self, config: InternVisionConfig):
|
135 |
+
super().__init__()
|
136 |
+
self.config = config
|
137 |
+
self.embed_dim = config.hidden_size
|
138 |
+
self.image_size = config.image_size
|
139 |
+
self.patch_size = config.patch_size
|
140 |
+
|
141 |
+
self.class_embedding = nn.Parameter(
|
142 |
+
torch.randn(1, 1, self.embed_dim),
|
143 |
+
)
|
144 |
+
|
145 |
+
self.patch_embedding = nn.Conv2d(
|
146 |
+
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
|
147 |
+
)
|
148 |
+
|
149 |
+
self.num_patches = (self.image_size // self.patch_size) ** 2
|
150 |
+
self.num_positions = self.num_patches + 1
|
151 |
+
|
152 |
+
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
|
153 |
+
|
154 |
+
def _get_pos_embed(self, pos_embed, H, W):
|
155 |
+
target_dtype = pos_embed.dtype
|
156 |
+
pos_embed = pos_embed.float().reshape(
|
157 |
+
1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
|
158 |
+
pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
|
159 |
+
reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
|
160 |
+
return pos_embed
|
161 |
+
|
162 |
+
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
|
163 |
+
target_dtype = self.patch_embedding.weight.dtype
|
164 |
+
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
|
165 |
+
batch_size, _, height, width = patch_embeds.shape
|
166 |
+
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
|
167 |
+
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
|
168 |
+
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
|
169 |
+
position_embedding = torch.cat([
|
170 |
+
self.position_embedding[:, :1, :],
|
171 |
+
self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
|
172 |
+
], dim=1)
|
173 |
+
embeddings = embeddings + position_embedding.to(target_dtype)
|
174 |
+
return embeddings
|
175 |
+
|
176 |
+
|
177 |
+
class InternAttention(nn.Module):
|
178 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
179 |
+
|
180 |
+
def __init__(self, config: InternVisionConfig):
|
181 |
+
super().__init__()
|
182 |
+
self.config = config
|
183 |
+
self.embed_dim = config.hidden_size
|
184 |
+
self.num_heads = config.num_attention_heads
|
185 |
+
self.use_flash_attn = config.use_flash_attn and has_flash_attn
|
186 |
+
if config.use_flash_attn and not has_flash_attn:
|
187 |
+
print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
|
188 |
+
self.head_dim = self.embed_dim // self.num_heads
|
189 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
190 |
+
raise ValueError(
|
191 |
+
f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
|
192 |
+
f' {self.num_heads}).'
|
193 |
+
)
|
194 |
+
|
195 |
+
self.scale = self.head_dim ** -0.5
|
196 |
+
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
|
197 |
+
self.attn_drop = nn.Dropout(config.attention_dropout)
|
198 |
+
self.proj_drop = nn.Dropout(config.dropout)
|
199 |
+
|
200 |
+
self.qk_normalization = config.qk_normalization
|
201 |
+
|
202 |
+
if self.qk_normalization:
|
203 |
+
self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
204 |
+
self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
|
205 |
+
|
206 |
+
if self.use_flash_attn:
|
207 |
+
self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
|
208 |
+
self.proj = nn.Linear(self.embed_dim, self.embed_dim)
|
209 |
+
|
210 |
+
def _naive_attn(self, x):
|
211 |
+
B, N, C = x.shape
|
212 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
213 |
+
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
|
214 |
+
|
215 |
+
if self.qk_normalization:
|
216 |
+
B_, H_, N_, D_ = q.shape
|
217 |
+
q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
218 |
+
k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
|
219 |
+
|
220 |
+
attn = ((q * self.scale) @ k.transpose(-2, -1))
|
221 |
+
attn = attn.softmax(dim=-1)
|
222 |
+
attn = self.attn_drop(attn)
|
223 |
+
|
224 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
225 |
+
x = self.proj(x)
|
226 |
+
x = self.proj_drop(x)
|
227 |
+
return x
|
228 |
+
|
229 |
+
def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
|
230 |
+
qkv = self.qkv(x)
|
231 |
+
qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
|
232 |
+
|
233 |
+
if self.qk_normalization:
|
234 |
+
q, k, v = qkv.unbind(2)
|
235 |
+
q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
|
236 |
+
k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
|
237 |
+
qkv = torch.stack([q, k, v], dim=2)
|
238 |
+
|
239 |
+
context, _ = self.inner_attn(
|
240 |
+
qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
|
241 |
+
)
|
242 |
+
outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
|
243 |
+
outs = self.proj_drop(outs)
|
244 |
+
return outs
|
245 |
+
|
246 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
247 |
+
x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
|
248 |
+
return x
|
249 |
+
|
250 |
+
|
251 |
+
class InternMLP(nn.Module):
|
252 |
+
def __init__(self, config: InternVisionConfig):
|
253 |
+
super().__init__()
|
254 |
+
self.config = config
|
255 |
+
self.act = ACT2FN[config.hidden_act]
|
256 |
+
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
257 |
+
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
258 |
+
|
259 |
+
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
260 |
+
hidden_states = self.fc1(hidden_states)
|
261 |
+
hidden_states = self.act(hidden_states)
|
262 |
+
hidden_states = self.fc2(hidden_states)
|
263 |
+
return hidden_states
|
264 |
+
|
265 |
+
|
266 |
+
class InternVisionEncoderLayer(nn.Module):
|
267 |
+
def __init__(self, config: InternVisionConfig, drop_path_rate: float):
|
268 |
+
super().__init__()
|
269 |
+
self.embed_dim = config.hidden_size
|
270 |
+
self.intermediate_size = config.intermediate_size
|
271 |
+
self.norm_type = config.norm_type
|
272 |
+
|
273 |
+
self.attn = InternAttention(config)
|
274 |
+
self.mlp = InternMLP(config)
|
275 |
+
self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
276 |
+
self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
|
277 |
+
|
278 |
+
self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
279 |
+
self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
|
280 |
+
self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
281 |
+
self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
|
282 |
+
|
283 |
+
def forward(
|
284 |
+
self,
|
285 |
+
hidden_states: torch.Tensor,
|
286 |
+
) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
|
287 |
+
"""
|
288 |
+
Args:
|
289 |
+
hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
290 |
+
"""
|
291 |
+
hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states)) * self.ls1)
|
292 |
+
|
293 |
+
hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states)) * self.ls2)
|
294 |
+
|
295 |
+
return hidden_states
|
296 |
+
|
297 |
+
|
298 |
+
class InternVisionEncoder(nn.Module):
|
299 |
+
"""
|
300 |
+
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
301 |
+
[`InternEncoderLayer`].
|
302 |
+
|
303 |
+
Args:
|
304 |
+
config (`InternConfig`):
|
305 |
+
The corresponding vision configuration for the `InternEncoder`.
|
306 |
+
"""
|
307 |
+
|
308 |
+
def __init__(self, config: InternVisionConfig):
|
309 |
+
super().__init__()
|
310 |
+
self.config = config
|
311 |
+
# stochastic depth decay rule
|
312 |
+
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
|
313 |
+
self.layers = nn.ModuleList([
|
314 |
+
InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
|
315 |
+
self.gradient_checkpointing = False
|
316 |
+
|
317 |
+
def forward(
|
318 |
+
self,
|
319 |
+
inputs_embeds,
|
320 |
+
output_hidden_states: Optional[bool] = None,
|
321 |
+
return_dict: Optional[bool] = None,
|
322 |
+
) -> Union[Tuple, BaseModelOutput]:
|
323 |
+
r"""
|
324 |
+
Args:
|
325 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
326 |
+
Embedded representation of the inputs. Should be float, not int tokens.
|
327 |
+
output_hidden_states (`bool`, *optional*):
|
328 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
|
329 |
+
for more detail.
|
330 |
+
return_dict (`bool`, *optional*):
|
331 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
332 |
+
"""
|
333 |
+
output_hidden_states = (
|
334 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
335 |
+
)
|
336 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
337 |
+
|
338 |
+
encoder_states = () if output_hidden_states else None
|
339 |
+
hidden_states = inputs_embeds
|
340 |
+
|
341 |
+
for idx, encoder_layer in enumerate(self.layers):
|
342 |
+
if output_hidden_states:
|
343 |
+
encoder_states = encoder_states + (hidden_states,)
|
344 |
+
if self.gradient_checkpointing and self.training:
|
345 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
346 |
+
encoder_layer,
|
347 |
+
hidden_states)
|
348 |
+
else:
|
349 |
+
layer_outputs = encoder_layer(
|
350 |
+
hidden_states,
|
351 |
+
)
|
352 |
+
hidden_states = layer_outputs
|
353 |
+
|
354 |
+
if output_hidden_states:
|
355 |
+
encoder_states = encoder_states + (hidden_states,)
|
356 |
+
|
357 |
+
if not return_dict:
|
358 |
+
return tuple(v for v in [hidden_states, encoder_states] if v is not None)
|
359 |
+
return BaseModelOutput(
|
360 |
+
last_hidden_state=hidden_states, hidden_states=encoder_states
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
class InternVisionModel(PreTrainedModel):
|
365 |
+
main_input_name = 'pixel_values'
|
366 |
+
_supports_flash_attn_2 = True
|
367 |
+
config_class = InternVisionConfig
|
368 |
+
_no_split_modules = ['InternVisionEncoderLayer']
|
369 |
+
|
370 |
+
def __init__(self, config: InternVisionConfig):
|
371 |
+
super().__init__(config)
|
372 |
+
self.config = config
|
373 |
+
|
374 |
+
self.embeddings = InternVisionEmbeddings(config)
|
375 |
+
self.encoder = InternVisionEncoder(config)
|
376 |
+
|
377 |
+
def resize_pos_embeddings(self, old_size, new_size, patch_size):
|
378 |
+
pos_emb = self.embeddings.position_embedding
|
379 |
+
_, num_positions, embed_dim = pos_emb.shape
|
380 |
+
cls_emb = pos_emb[:, :1, :]
|
381 |
+
pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
|
382 |
+
pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
|
383 |
+
pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
|
384 |
+
pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
|
385 |
+
self.embeddings.position_embedding = nn.Parameter(pos_emb)
|
386 |
+
self.embeddings.image_size = new_size
|
387 |
+
logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
|
388 |
+
|
389 |
+
def get_input_embeddings(self):
|
390 |
+
return self.embeddings
|
391 |
+
|
392 |
+
def forward(
|
393 |
+
self,
|
394 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
395 |
+
output_hidden_states: Optional[bool] = None,
|
396 |
+
return_dict: Optional[bool] = None,
|
397 |
+
pixel_embeds: Optional[torch.FloatTensor] = None,
|
398 |
+
) -> Union[Tuple, BaseModelOutputWithPooling]:
|
399 |
+
output_hidden_states = (
|
400 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
401 |
+
)
|
402 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
403 |
+
|
404 |
+
if pixel_values is None and pixel_embeds is None:
|
405 |
+
raise ValueError('You have to specify pixel_values or pixel_embeds')
|
406 |
+
|
407 |
+
if pixel_embeds is not None:
|
408 |
+
hidden_states = pixel_embeds
|
409 |
+
else:
|
410 |
+
if len(pixel_values.shape) == 4:
|
411 |
+
hidden_states = self.embeddings(pixel_values)
|
412 |
+
else:
|
413 |
+
raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
|
414 |
+
encoder_outputs = self.encoder(
|
415 |
+
inputs_embeds=hidden_states,
|
416 |
+
output_hidden_states=output_hidden_states,
|
417 |
+
return_dict=return_dict,
|
418 |
+
)
|
419 |
+
last_hidden_state = encoder_outputs.last_hidden_state
|
420 |
+
pooled_output = last_hidden_state[:, 0, :]
|
421 |
+
|
422 |
+
if not return_dict:
|
423 |
+
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
|
424 |
+
|
425 |
+
return BaseModelOutputWithPooling(
|
426 |
+
last_hidden_state=last_hidden_state,
|
427 |
+
pooler_output=pooled_output,
|
428 |
+
hidden_states=encoder_outputs.hidden_states,
|
429 |
+
attentions=encoder_outputs.attentions,
|
430 |
+
)
|
model/arch_7b/modeling_internlm2.py
ADDED
@@ -0,0 +1,1487 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/modeling_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
""" PyTorch InternLM2 model."""
|
17 |
+
import math
|
18 |
+
import queue
|
19 |
+
import threading
|
20 |
+
import warnings
|
21 |
+
from typing import List, Optional, Tuple, Union
|
22 |
+
|
23 |
+
import torch
|
24 |
+
import torch.nn.functional as F
|
25 |
+
import torch.utils.checkpoint
|
26 |
+
from einops import rearrange
|
27 |
+
from torch import nn
|
28 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
29 |
+
from transformers.activations import ACT2FN
|
30 |
+
from transformers.modeling_outputs import (BaseModelOutputWithPast,
|
31 |
+
CausalLMOutputWithPast,
|
32 |
+
SequenceClassifierOutputWithPast)
|
33 |
+
from transformers.modeling_utils import PreTrainedModel
|
34 |
+
from transformers.utils import (add_start_docstrings,
|
35 |
+
add_start_docstrings_to_model_forward, logging,
|
36 |
+
replace_return_docstrings)
|
37 |
+
|
38 |
+
try:
|
39 |
+
from transformers.generation.streamers import BaseStreamer
|
40 |
+
except: # noqa # pylint: disable=bare-except
|
41 |
+
BaseStreamer = None
|
42 |
+
|
43 |
+
from .configuration_internlm2 import InternLM2Config
|
44 |
+
|
45 |
+
# Phantom
|
46 |
+
from utils.utils import *
|
47 |
+
|
48 |
+
logger = logging.get_logger(__name__)
|
49 |
+
|
50 |
+
_CONFIG_FOR_DOC = 'InternLM2Config'
|
51 |
+
|
52 |
+
flash_attn_func, flash_attn_varlen_func = None, None
|
53 |
+
pad_input, index_first_axis, unpad_input = None, None, None
|
54 |
+
try:
|
55 |
+
from flash_attn import flash_attn_func as _flash_attn_func
|
56 |
+
from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
|
57 |
+
from flash_attn.bert_padding import index_first_axis as _index_first_axis
|
58 |
+
from flash_attn.bert_padding import pad_input as _pad_input
|
59 |
+
from flash_attn.bert_padding import unpad_input as _unpad_input
|
60 |
+
|
61 |
+
flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
|
62 |
+
pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
|
63 |
+
has_flash_attn = True
|
64 |
+
except:
|
65 |
+
has_flash_attn = False
|
66 |
+
|
67 |
+
|
68 |
+
def _import_flash_attn():
|
69 |
+
global flash_attn_func, flash_attn_varlen_func
|
70 |
+
global pad_input, index_first_axis, unpad_input
|
71 |
+
try:
|
72 |
+
from flash_attn import flash_attn_func as _flash_attn_func
|
73 |
+
from flash_attn import \
|
74 |
+
flash_attn_varlen_func as _flash_attn_varlen_func
|
75 |
+
from flash_attn.bert_padding import \
|
76 |
+
index_first_axis as _index_first_axis
|
77 |
+
from flash_attn.bert_padding import pad_input as _pad_input
|
78 |
+
from flash_attn.bert_padding import unpad_input as _unpad_input
|
79 |
+
flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
|
80 |
+
pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
|
81 |
+
except ImportError:
|
82 |
+
raise ImportError('flash_attn is not installed.')
|
83 |
+
|
84 |
+
|
85 |
+
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
86 |
+
def _get_unpad_data(attention_mask):
|
87 |
+
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
88 |
+
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
89 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
90 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
91 |
+
return (
|
92 |
+
indices,
|
93 |
+
cu_seqlens,
|
94 |
+
max_seqlen_in_batch,
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
|
99 |
+
def _make_causal_mask(
|
100 |
+
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
|
101 |
+
):
|
102 |
+
"""
|
103 |
+
Make causal mask used for bi-directional self-attention.
|
104 |
+
"""
|
105 |
+
bsz, tgt_len = input_ids_shape
|
106 |
+
mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
|
107 |
+
mask_cond = torch.arange(mask.size(-1), device=device)
|
108 |
+
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
|
109 |
+
mask = mask.to(dtype)
|
110 |
+
|
111 |
+
if past_key_values_length > 0:
|
112 |
+
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
|
113 |
+
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
|
114 |
+
|
115 |
+
|
116 |
+
# Copied from transformers.models.bart.modeling_bart._expand_mask
|
117 |
+
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
|
118 |
+
"""
|
119 |
+
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
120 |
+
"""
|
121 |
+
bsz, src_len = mask.size()
|
122 |
+
tgt_len = tgt_len if tgt_len is not None else src_len
|
123 |
+
|
124 |
+
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
|
125 |
+
|
126 |
+
inverted_mask = 1.0 - expanded_mask
|
127 |
+
|
128 |
+
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
|
129 |
+
|
130 |
+
|
131 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
|
132 |
+
class InternLM2RMSNorm(nn.Module):
|
133 |
+
def __init__(self, hidden_size, eps=1e-6):
|
134 |
+
"""
|
135 |
+
InternLM2RMSNorm is equivalent to T5LayerNorm
|
136 |
+
"""
|
137 |
+
super().__init__()
|
138 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
139 |
+
self.variance_epsilon = eps
|
140 |
+
|
141 |
+
def forward(self, hidden_states):
|
142 |
+
input_dtype = hidden_states.dtype
|
143 |
+
hidden_states = hidden_states.to(torch.float32)
|
144 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
145 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
146 |
+
return self.weight * hidden_states.to(input_dtype)
|
147 |
+
|
148 |
+
|
149 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
|
150 |
+
class InternLM2RotaryEmbedding(nn.Module):
|
151 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
152 |
+
super().__init__()
|
153 |
+
|
154 |
+
self.dim = dim
|
155 |
+
self.max_position_embeddings = max_position_embeddings
|
156 |
+
self.base = base
|
157 |
+
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
158 |
+
self.register_buffer('inv_freq', inv_freq, persistent=False)
|
159 |
+
|
160 |
+
# Build here to make `torch.jit.trace` work.
|
161 |
+
self._set_cos_sin_cache(
|
162 |
+
seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
|
163 |
+
)
|
164 |
+
|
165 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
166 |
+
self.max_seq_len_cached = seq_len
|
167 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
168 |
+
|
169 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
170 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
171 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
172 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
173 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
174 |
+
|
175 |
+
def forward(self, x, seq_len=None):
|
176 |
+
# x: [bs, num_attention_heads, seq_len, head_size]
|
177 |
+
if seq_len > self.max_seq_len_cached:
|
178 |
+
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
|
179 |
+
|
180 |
+
return (
|
181 |
+
self.cos_cached[:seq_len].to(dtype=x.dtype),
|
182 |
+
self.sin_cached[:seq_len].to(dtype=x.dtype),
|
183 |
+
)
|
184 |
+
|
185 |
+
|
186 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
|
187 |
+
class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
188 |
+
"""InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
|
189 |
+
|
190 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
191 |
+
self.scaling_factor = scaling_factor
|
192 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
193 |
+
|
194 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
195 |
+
self.max_seq_len_cached = seq_len
|
196 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
197 |
+
t = t / self.scaling_factor
|
198 |
+
|
199 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
200 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
201 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
202 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
203 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
204 |
+
|
205 |
+
|
206 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
|
207 |
+
class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
|
208 |
+
"""InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
|
209 |
+
Credits to the Reddit users /u/bloc97 and /u/emozilla.
|
210 |
+
"""
|
211 |
+
|
212 |
+
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
|
213 |
+
self.scaling_factor = scaling_factor
|
214 |
+
super().__init__(dim, max_position_embeddings, base, device)
|
215 |
+
|
216 |
+
def _set_cos_sin_cache(self, seq_len, device, dtype):
|
217 |
+
self.max_seq_len_cached = seq_len
|
218 |
+
|
219 |
+
if seq_len > self.max_position_embeddings:
|
220 |
+
base = self.base * (
|
221 |
+
(self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
|
222 |
+
) ** (self.dim / (self.dim - 2))
|
223 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
|
224 |
+
self.register_buffer('inv_freq', inv_freq, persistent=False)
|
225 |
+
|
226 |
+
t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
|
227 |
+
|
228 |
+
freqs = torch.einsum('i,j->ij', t, self.inv_freq)
|
229 |
+
# Different from paper, but it uses a different permutation in order to obtain the same calculation
|
230 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
231 |
+
self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
|
232 |
+
self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
|
233 |
+
|
234 |
+
|
235 |
+
# Copied from transformers.model.llama.modeling_llama.rotate_half
|
236 |
+
def rotate_half(x):
|
237 |
+
"""Rotates half the hidden dims of the input."""
|
238 |
+
x1 = x[..., : x.shape[-1] // 2]
|
239 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
240 |
+
return torch.cat((-x2, x1), dim=-1)
|
241 |
+
|
242 |
+
|
243 |
+
# Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
|
244 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
|
245 |
+
"""Applies Rotary Position Embedding to the query and key tensors."""
|
246 |
+
cos = cos[position_ids].unsqueeze(unsqueeze_dim)
|
247 |
+
sin = sin[position_ids].unsqueeze(unsqueeze_dim)
|
248 |
+
q_embed = (q * cos) + (rotate_half(q) * sin)
|
249 |
+
k_embed = (k * cos) + (rotate_half(k) * sin)
|
250 |
+
return q_embed, k_embed
|
251 |
+
|
252 |
+
|
253 |
+
class InternLM2MLP(nn.Module):
|
254 |
+
def __init__(self, config):
|
255 |
+
super().__init__()
|
256 |
+
self.config = config
|
257 |
+
self.hidden_size = config.hidden_size
|
258 |
+
self.intermediate_size = config.intermediate_size
|
259 |
+
self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
260 |
+
self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
261 |
+
self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
262 |
+
self.act_fn = ACT2FN[config.hidden_act]
|
263 |
+
|
264 |
+
def forward(self, x):
|
265 |
+
down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
|
266 |
+
|
267 |
+
return down_proj
|
268 |
+
|
269 |
+
|
270 |
+
# Copied from transformers.model.llama.modeling_llama.repeat_kv
|
271 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
272 |
+
"""
|
273 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
274 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
275 |
+
"""
|
276 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
277 |
+
if n_rep == 1:
|
278 |
+
return hidden_states
|
279 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
280 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
281 |
+
|
282 |
+
|
283 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaAttention
|
284 |
+
class InternLM2Attention(nn.Module):
|
285 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
286 |
+
|
287 |
+
def __init__(self, config: InternLM2Config):
|
288 |
+
super().__init__()
|
289 |
+
self.config = config
|
290 |
+
self.hidden_size = config.hidden_size
|
291 |
+
self.num_heads = config.num_attention_heads
|
292 |
+
self.head_dim = self.hidden_size // self.num_heads
|
293 |
+
self.num_key_value_heads = config.num_key_value_heads
|
294 |
+
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
295 |
+
self.max_position_embeddings = config.max_position_embeddings
|
296 |
+
self.is_causal = True
|
297 |
+
|
298 |
+
if (self.head_dim * self.num_heads) != self.hidden_size:
|
299 |
+
raise ValueError(
|
300 |
+
f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
|
301 |
+
f' and `num_heads`: {self.num_heads}).'
|
302 |
+
)
|
303 |
+
|
304 |
+
self.wqkv = nn.Linear(
|
305 |
+
self.hidden_size,
|
306 |
+
(self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
|
307 |
+
bias=config.bias,
|
308 |
+
)
|
309 |
+
|
310 |
+
self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
|
311 |
+
self._init_rope()
|
312 |
+
|
313 |
+
"""
|
314 |
+
Phantom
|
315 |
+
"""
|
316 |
+
# Phantom Init
|
317 |
+
self.turn_on_phantom=True
|
318 |
+
self.xattn_query_phantom = XAttention(self.head_dim)
|
319 |
+
self.xattn_key_phantom = XAttention(self.head_dim)
|
320 |
+
self.xattn_value_phantom = XAttention(self.head_dim)
|
321 |
+
self.gating_phantom_1 = nn.Linear(self.head_dim, 1)
|
322 |
+
self.gating_phantom_2 = nn.Linear(self.head_dim, 1)
|
323 |
+
|
324 |
+
def _init_rope(self):
|
325 |
+
if self.config.rope_scaling is None:
|
326 |
+
self.rotary_emb = InternLM2RotaryEmbedding(
|
327 |
+
self.head_dim,
|
328 |
+
max_position_embeddings=self.max_position_embeddings,
|
329 |
+
base=self.config.rope_theta,
|
330 |
+
)
|
331 |
+
else:
|
332 |
+
scaling_type = self.config.rope_scaling['type']
|
333 |
+
scaling_factor = self.config.rope_scaling['factor']
|
334 |
+
if scaling_type == 'dynamic':
|
335 |
+
self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
|
336 |
+
self.head_dim,
|
337 |
+
max_position_embeddings=self.max_position_embeddings,
|
338 |
+
base=self.config.rope_theta,
|
339 |
+
scaling_factor=scaling_factor,
|
340 |
+
)
|
341 |
+
elif scaling_type == 'linear':
|
342 |
+
self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
|
343 |
+
self.head_dim,
|
344 |
+
max_position_embeddings=self.max_position_embeddings,
|
345 |
+
base=self.config.rope_theta,
|
346 |
+
scaling_factor=scaling_factor,
|
347 |
+
)
|
348 |
+
else:
|
349 |
+
raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
|
350 |
+
return self.rotary_emb
|
351 |
+
|
352 |
+
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
|
353 |
+
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
|
354 |
+
|
355 |
+
def forward(
|
356 |
+
self,
|
357 |
+
hidden_states: torch.Tensor,
|
358 |
+
attention_mask: Optional[torch.Tensor] = None,
|
359 |
+
position_ids: Optional[torch.LongTensor] = None,
|
360 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
361 |
+
output_attentions: bool = False,
|
362 |
+
use_cache: bool = False,
|
363 |
+
**kwargs,
|
364 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
365 |
+
if 'padding_mask' in kwargs:
|
366 |
+
warnings.warn(
|
367 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
368 |
+
'Please make sure use `attention_mask` instead.`'
|
369 |
+
)
|
370 |
+
|
371 |
+
bsz, q_len, _ = hidden_states.size()
|
372 |
+
|
373 |
+
qkv_states = self.wqkv(hidden_states)
|
374 |
+
|
375 |
+
qkv_states = rearrange(
|
376 |
+
qkv_states,
|
377 |
+
'b q (h gs d) -> b q h gs d',
|
378 |
+
gs=2 + self.num_key_value_groups,
|
379 |
+
d=self.head_dim,
|
380 |
+
)
|
381 |
+
|
382 |
+
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
383 |
+
query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
|
384 |
+
key_states = qkv_states[..., -2, :]
|
385 |
+
value_states = qkv_states[..., -1, :]
|
386 |
+
|
387 |
+
query_states = query_states.transpose(1, 2)
|
388 |
+
key_states = key_states.transpose(1, 2)
|
389 |
+
value_states = value_states.transpose(1, 2)
|
390 |
+
|
391 |
+
kv_seq_len = key_states.shape[-2]
|
392 |
+
if past_key_value is not None:
|
393 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
394 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
395 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
396 |
+
|
397 |
+
if past_key_value is not None:
|
398 |
+
# reuse k, v, self_attention
|
399 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
400 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
401 |
+
|
402 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
403 |
+
|
404 |
+
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
405 |
+
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
406 |
+
|
407 |
+
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
408 |
+
|
409 |
+
if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
410 |
+
raise ValueError(
|
411 |
+
f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
|
412 |
+
f' {attn_weights.size()}'
|
413 |
+
)
|
414 |
+
|
415 |
+
if attention_mask is not None:
|
416 |
+
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
417 |
+
raise ValueError(
|
418 |
+
f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
|
419 |
+
)
|
420 |
+
attn_weights = attn_weights + attention_mask
|
421 |
+
|
422 |
+
# upcast attention to fp32
|
423 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
424 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
425 |
+
|
426 |
+
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
427 |
+
raise ValueError(
|
428 |
+
f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
|
429 |
+
f' {attn_output.size()}'
|
430 |
+
)
|
431 |
+
|
432 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
433 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
434 |
+
|
435 |
+
attn_output = self.wo(attn_output)
|
436 |
+
|
437 |
+
if not output_attentions:
|
438 |
+
attn_weights = None
|
439 |
+
|
440 |
+
return attn_output, attn_weights, past_key_value
|
441 |
+
|
442 |
+
|
443 |
+
# Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
|
444 |
+
class InternLM2FlashAttention2(InternLM2Attention):
|
445 |
+
"""
|
446 |
+
InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
|
447 |
+
untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
448 |
+
flash attention and deal with padding tokens in case the input contains any of them.
|
449 |
+
"""
|
450 |
+
|
451 |
+
def forward(
|
452 |
+
self,
|
453 |
+
hidden_states: torch.Tensor,
|
454 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
455 |
+
position_ids: Optional[torch.LongTensor] = None,
|
456 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
457 |
+
phantom_position: torch.BoolTensor = None,
|
458 |
+
output_attentions: bool = False,
|
459 |
+
use_cache: bool = False,
|
460 |
+
**kwargs,
|
461 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
462 |
+
# InternLM2FlashAttention2 attention does not support output_attentions
|
463 |
+
if 'padding_mask' in kwargs:
|
464 |
+
warnings.warn(
|
465 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
466 |
+
'Please make sure use `attention_mask` instead.`'
|
467 |
+
)
|
468 |
+
|
469 |
+
# overwrite attention_mask with padding_mask
|
470 |
+
attention_mask = kwargs.pop('padding_mask')
|
471 |
+
|
472 |
+
output_attentions = False
|
473 |
+
|
474 |
+
bsz, q_len, _ = hidden_states.size()
|
475 |
+
|
476 |
+
qkv_states = self.wqkv(hidden_states)
|
477 |
+
|
478 |
+
qkv_states = rearrange(
|
479 |
+
qkv_states,
|
480 |
+
'b q (h gs d) -> b q h gs d',
|
481 |
+
gs=2 + self.num_key_value_groups,
|
482 |
+
d=self.head_dim,
|
483 |
+
)
|
484 |
+
|
485 |
+
query_states = qkv_states[..., : self.num_key_value_groups, :]
|
486 |
+
query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
|
487 |
+
key_states = qkv_states[..., -2, :]
|
488 |
+
value_states = qkv_states[..., -1, :]
|
489 |
+
|
490 |
+
query_states = query_states.transpose(1, 2)
|
491 |
+
key_states = key_states.transpose(1, 2)
|
492 |
+
value_states = value_states.transpose(1, 2)
|
493 |
+
|
494 |
+
kv_seq_len = key_states.shape[-2]
|
495 |
+
if past_key_value is not None:
|
496 |
+
kv_seq_len += past_key_value[0].shape[-2]
|
497 |
+
|
498 |
+
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
499 |
+
|
500 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
|
501 |
+
|
502 |
+
if past_key_value is not None:
|
503 |
+
# reuse k, v, self_attention
|
504 |
+
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
505 |
+
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
506 |
+
|
507 |
+
past_key_value = (key_states, value_states) if use_cache else None
|
508 |
+
|
509 |
+
query_states = query_states.transpose(1, 2)
|
510 |
+
key_states = key_states.transpose(1, 2)
|
511 |
+
value_states = value_states.transpose(1, 2)
|
512 |
+
|
513 |
+
attn_output = self._flash_attention_forward(
|
514 |
+
query_states, key_states, value_states, attention_mask, q_len, phantom_position
|
515 |
+
)
|
516 |
+
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
517 |
+
attn_output = self.wo(attn_output)
|
518 |
+
|
519 |
+
if not output_attentions:
|
520 |
+
attn_weights = None
|
521 |
+
|
522 |
+
return attn_output, attn_weights, past_key_value
|
523 |
+
|
524 |
+
def _flash_attention_forward(
|
525 |
+
self, query_states, key_states, value_states, attention_mask, query_length, phantom_position, dropout=0.0, softmax_scale=None
|
526 |
+
):
|
527 |
+
"""
|
528 |
+
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
|
529 |
+
first unpad the input, then computes the attention scores and pad the final attention scores.
|
530 |
+
|
531 |
+
Args:
|
532 |
+
query_states (`torch.Tensor`):
|
533 |
+
Input query states to be passed to Flash Attention API
|
534 |
+
key_states (`torch.Tensor`):
|
535 |
+
Input key states to be passed to Flash Attention API
|
536 |
+
value_states (`torch.Tensor`):
|
537 |
+
Input value states to be passed to Flash Attention API
|
538 |
+
attention_mask (`torch.Tensor`):
|
539 |
+
The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
|
540 |
+
position of padding tokens and 1 for the position of non-padding tokens.
|
541 |
+
dropout (`int`, *optional*):
|
542 |
+
Attention dropout
|
543 |
+
softmax_scale (`float`, *optional*):
|
544 |
+
The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
|
545 |
+
"""
|
546 |
+
|
547 |
+
"""
|
548 |
+
Phantom
|
549 |
+
"""
|
550 |
+
if self.turn_on_phantom:
|
551 |
+
|
552 |
+
# [Important] softmax_scale
|
553 |
+
softmax_scale = 1 / math.sqrt(query_states.shape[-1])
|
554 |
+
|
555 |
+
query_states_phantom = []
|
556 |
+
key_states_phantom = []
|
557 |
+
value_states_phantom = []
|
558 |
+
for index, pos in enumerate(phantom_position):
|
559 |
+
if query_states.shape[1] > 1:
|
560 |
+
query_states_phantom.append(query_states[index][pos])
|
561 |
+
key_states_phantom.append(key_states[index][pos])
|
562 |
+
value_states_phantom.append(value_states[index][pos])
|
563 |
+
|
564 |
+
# saving phantom qkv for inference
|
565 |
+
self.query_states_phantom = query_states_phantom
|
566 |
+
self.key_states_phantom = key_states_phantom
|
567 |
+
self.value_states_phantom = value_states_phantom
|
568 |
+
|
569 |
+
# phantom qkv: list to tensor
|
570 |
+
query_states_phantom = torch.stack(self.query_states_phantom)
|
571 |
+
key_states_phantom = torch.stack(self.key_states_phantom)
|
572 |
+
value_states_phantom = torch.stack(self.value_states_phantom)
|
573 |
+
|
574 |
+
# phantom qkv: 1 -> N (sequence)
|
575 |
+
query_states_phantom = self.xattn_query_phantom(q=query_states, k=query_states_phantom, v=query_states_phantom)
|
576 |
+
key_states_phantom = self.xattn_key_phantom(q=key_states, k=key_states_phantom, v=key_states_phantom)
|
577 |
+
value_states_phantom = self.xattn_value_phantom(q=value_states, k=value_states_phantom, v=value_states_phantom, is_residual=True)
|
578 |
+
|
579 |
+
# concat original qkv and phantom qkv for hidden-dimension / heads
|
580 |
+
query_states = torch.cat([query_states, query_states_phantom], dim=3)
|
581 |
+
key_states = torch.cat([key_states, key_states_phantom], dim=3)
|
582 |
+
value_states = torch.cat([value_states, value_states_phantom], dim=3)
|
583 |
+
|
584 |
+
# Contains at least one padding token in the sequence
|
585 |
+
causal = self.is_causal and query_length != 1
|
586 |
+
if attention_mask is not None:
|
587 |
+
batch_size = query_states.shape[0]
|
588 |
+
query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
|
589 |
+
query_states, key_states, value_states, attention_mask, query_length
|
590 |
+
)
|
591 |
+
|
592 |
+
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
|
593 |
+
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
|
594 |
+
|
595 |
+
attn_output_unpad = flash_attn_varlen_func(
|
596 |
+
query_states,
|
597 |
+
key_states,
|
598 |
+
value_states,
|
599 |
+
cu_seqlens_q=cu_seqlens_q,
|
600 |
+
cu_seqlens_k=cu_seqlens_k,
|
601 |
+
max_seqlen_q=max_seqlen_in_batch_q,
|
602 |
+
max_seqlen_k=max_seqlen_in_batch_k,
|
603 |
+
dropout_p=dropout,
|
604 |
+
softmax_scale=softmax_scale,
|
605 |
+
causal=causal,
|
606 |
+
)
|
607 |
+
|
608 |
+
attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
|
609 |
+
else:
|
610 |
+
attn_output = flash_attn_func(
|
611 |
+
query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
|
612 |
+
)
|
613 |
+
|
614 |
+
"""
|
615 |
+
Phantom
|
616 |
+
"""
|
617 |
+
if self.turn_on_phantom:
|
618 |
+
half_dim = attn_output.shape[-1] // 2
|
619 |
+
half1_o = attn_output[...,:half_dim]
|
620 |
+
half2_o = attn_output[...,half_dim:]
|
621 |
+
weight1 = self.gating_phantom_1(half1_o)
|
622 |
+
weight2 = self.gating_phantom_2(half2_o)
|
623 |
+
weight_norm = weight1.exp() / (weight1.exp() + weight2.exp())
|
624 |
+
attn_output = weight_norm * half1_o + (1-weight_norm) * half2_o
|
625 |
+
return attn_output
|
626 |
+
|
627 |
+
def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
|
628 |
+
indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
|
629 |
+
batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
|
630 |
+
|
631 |
+
key_layer = index_first_axis(
|
632 |
+
key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
633 |
+
)
|
634 |
+
value_layer = index_first_axis(
|
635 |
+
value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
|
636 |
+
)
|
637 |
+
|
638 |
+
if query_length == kv_seq_len:
|
639 |
+
query_layer = index_first_axis(
|
640 |
+
query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
|
641 |
+
)
|
642 |
+
cu_seqlens_q = cu_seqlens_k
|
643 |
+
max_seqlen_in_batch_q = max_seqlen_in_batch_k
|
644 |
+
indices_q = indices_k
|
645 |
+
elif query_length == 1:
|
646 |
+
max_seqlen_in_batch_q = 1
|
647 |
+
cu_seqlens_q = torch.arange(
|
648 |
+
batch_size + 1, dtype=torch.int32, device=query_layer.device
|
649 |
+
) # There is a memcpy here, that is very bad.
|
650 |
+
indices_q = cu_seqlens_q[:-1]
|
651 |
+
query_layer = query_layer.squeeze(1)
|
652 |
+
else:
|
653 |
+
# The -q_len: slice assumes left padding.
|
654 |
+
attention_mask = attention_mask[:, -query_length:]
|
655 |
+
query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
|
656 |
+
|
657 |
+
return (
|
658 |
+
query_layer,
|
659 |
+
key_layer,
|
660 |
+
value_layer,
|
661 |
+
indices_q.to(torch.int64),
|
662 |
+
(cu_seqlens_q, cu_seqlens_k),
|
663 |
+
(max_seqlen_in_batch_q, max_seqlen_in_batch_k),
|
664 |
+
)
|
665 |
+
|
666 |
+
|
667 |
+
INTERNLM2_ATTENTION_CLASSES = {
|
668 |
+
'eager': InternLM2Attention,
|
669 |
+
'flash_attention_2': InternLM2FlashAttention2,
|
670 |
+
}
|
671 |
+
|
672 |
+
|
673 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
|
674 |
+
class InternLM2DecoderLayer(nn.Module):
|
675 |
+
def __init__(self, config: InternLM2Config):
|
676 |
+
super().__init__()
|
677 |
+
self.hidden_size = config.hidden_size
|
678 |
+
|
679 |
+
self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
|
680 |
+
|
681 |
+
self.feed_forward = InternLM2MLP(config)
|
682 |
+
self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
683 |
+
self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
684 |
+
|
685 |
+
def forward(
|
686 |
+
self,
|
687 |
+
hidden_states: torch.Tensor,
|
688 |
+
attention_mask: Optional[torch.Tensor] = None,
|
689 |
+
position_ids: Optional[torch.LongTensor] = None,
|
690 |
+
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
691 |
+
phantom_position: torch.BoolTensor = None,
|
692 |
+
output_attentions: Optional[bool] = False,
|
693 |
+
use_cache: Optional[bool] = False,
|
694 |
+
**kwargs,
|
695 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
696 |
+
"""
|
697 |
+
Args:
|
698 |
+
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
699 |
+
attention_mask (`torch.FloatTensor`, *optional*):
|
700 |
+
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
701 |
+
query_sequence_length, key_sequence_length)` if default attention is used.
|
702 |
+
output_attentions (`bool`, *optional*):
|
703 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
704 |
+
returned tensors for more detail.
|
705 |
+
use_cache (`bool`, *optional*):
|
706 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
707 |
+
(see `past_key_values`).
|
708 |
+
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
709 |
+
"""
|
710 |
+
if 'padding_mask' in kwargs:
|
711 |
+
warnings.warn(
|
712 |
+
'Passing `padding_mask` is deprecated and will be removed in v4.37. '
|
713 |
+
'Please make sure use `attention_mask` instead.`'
|
714 |
+
)
|
715 |
+
|
716 |
+
residual = hidden_states
|
717 |
+
|
718 |
+
hidden_states = self.attention_norm(hidden_states)
|
719 |
+
|
720 |
+
# Self Attention
|
721 |
+
hidden_states, self_attn_weights, present_key_value = self.attention(
|
722 |
+
hidden_states=hidden_states,
|
723 |
+
attention_mask=attention_mask,
|
724 |
+
position_ids=position_ids,
|
725 |
+
past_key_value=past_key_value,
|
726 |
+
phantom_position=phantom_position,
|
727 |
+
output_attentions=output_attentions,
|
728 |
+
use_cache=use_cache,
|
729 |
+
**kwargs,
|
730 |
+
)
|
731 |
+
hidden_states = residual + hidden_states
|
732 |
+
|
733 |
+
# Fully Connected
|
734 |
+
residual = hidden_states
|
735 |
+
hidden_states = self.ffn_norm(hidden_states)
|
736 |
+
hidden_states = self.feed_forward(hidden_states)
|
737 |
+
hidden_states = residual + hidden_states
|
738 |
+
|
739 |
+
outputs = (hidden_states,)
|
740 |
+
|
741 |
+
if output_attentions:
|
742 |
+
outputs += (self_attn_weights,)
|
743 |
+
|
744 |
+
if use_cache:
|
745 |
+
outputs += (present_key_value,)
|
746 |
+
|
747 |
+
return outputs
|
748 |
+
|
749 |
+
|
750 |
+
InternLM2_START_DOCSTRING = r"""
|
751 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
752 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
753 |
+
etc.)
|
754 |
+
|
755 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
756 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
757 |
+
and behavior.
|
758 |
+
|
759 |
+
Parameters:
|
760 |
+
config ([`InternLM2Config`]):
|
761 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
762 |
+
load the weights associated with the model, only the configuration. Check out the
|
763 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
764 |
+
"""
|
765 |
+
|
766 |
+
|
767 |
+
# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
|
768 |
+
@add_start_docstrings(
|
769 |
+
'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
|
770 |
+
InternLM2_START_DOCSTRING,
|
771 |
+
)
|
772 |
+
class InternLM2PreTrainedModel(PreTrainedModel):
|
773 |
+
config_class = InternLM2Config
|
774 |
+
base_model_prefix = 'model'
|
775 |
+
supports_gradient_checkpointing = True
|
776 |
+
_no_split_modules = ['InternLM2DecoderLayer']
|
777 |
+
_skip_keys_device_placement = 'past_key_values'
|
778 |
+
_supports_flash_attn_2 = True
|
779 |
+
|
780 |
+
def _init_weights(self, module):
|
781 |
+
std = self.config.initializer_range
|
782 |
+
if isinstance(module, nn.Linear):
|
783 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
784 |
+
if module.bias is not None:
|
785 |
+
module.bias.data.zero_()
|
786 |
+
elif isinstance(module, nn.Embedding):
|
787 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
788 |
+
if module.padding_idx is not None:
|
789 |
+
module.weight.data[module.padding_idx].zero_()
|
790 |
+
|
791 |
+
|
792 |
+
InternLM2_INPUTS_DOCSTRING = r"""
|
793 |
+
Args:
|
794 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
795 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
796 |
+
it.
|
797 |
+
|
798 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
799 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
800 |
+
|
801 |
+
[What are input IDs?](../glossary#input-ids)
|
802 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
803 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
804 |
+
|
805 |
+
- 1 for tokens that are **not masked**,
|
806 |
+
- 0 for tokens that are **masked**.
|
807 |
+
|
808 |
+
[What are attention masks?](../glossary#attention-mask)
|
809 |
+
|
810 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
811 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
812 |
+
|
813 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
814 |
+
`past_key_values`).
|
815 |
+
|
816 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
817 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
818 |
+
information on the default strategy.
|
819 |
+
|
820 |
+
- 1 indicates the head is **not masked**,
|
821 |
+
- 0 indicates the head is **masked**.
|
822 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
823 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
824 |
+
config.n_positions - 1]`.
|
825 |
+
|
826 |
+
[What are position IDs?](../glossary#position-ids)
|
827 |
+
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
|
828 |
+
when `config.use_cache=True`):
|
829 |
+
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
830 |
+
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
831 |
+
`(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
|
832 |
+
|
833 |
+
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
834 |
+
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
835 |
+
|
836 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
837 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
838 |
+
of shape `(batch_size, sequence_length)`.
|
839 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
840 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
841 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
842 |
+
model's internal embedding lookup matrix.
|
843 |
+
use_cache (`bool`, *optional*):
|
844 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
845 |
+
`past_key_values`).
|
846 |
+
output_attentions (`bool`, *optional*):
|
847 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
848 |
+
tensors for more detail.
|
849 |
+
output_hidden_states (`bool`, *optional*):
|
850 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
851 |
+
more detail.
|
852 |
+
return_dict (`bool`, *optional*):
|
853 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
854 |
+
"""
|
855 |
+
|
856 |
+
|
857 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaModel
|
858 |
+
@add_start_docstrings(
|
859 |
+
'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
|
860 |
+
InternLM2_START_DOCSTRING,
|
861 |
+
)
|
862 |
+
class InternLM2Model(InternLM2PreTrainedModel):
|
863 |
+
"""
|
864 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
|
865 |
+
|
866 |
+
Args:
|
867 |
+
config: InternLM2Config
|
868 |
+
"""
|
869 |
+
|
870 |
+
_auto_class = 'AutoModel'
|
871 |
+
|
872 |
+
def __init__(self, config: InternLM2Config):
|
873 |
+
super().__init__(config)
|
874 |
+
self.padding_idx = config.pad_token_id
|
875 |
+
self.vocab_size = config.vocab_size
|
876 |
+
self.config = config
|
877 |
+
if not has_flash_attn:
|
878 |
+
self.config.attn_implementation = 'eager'
|
879 |
+
print('Warning: Flash attention is not available, using eager attention instead.')
|
880 |
+
|
881 |
+
self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
882 |
+
|
883 |
+
self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
|
884 |
+
self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
885 |
+
|
886 |
+
self.gradient_checkpointing = False
|
887 |
+
# Initialize weights and apply final processing
|
888 |
+
self.post_init()
|
889 |
+
|
890 |
+
def get_input_embeddings(self):
|
891 |
+
return self.tok_embeddings
|
892 |
+
|
893 |
+
def set_input_embeddings(self, value):
|
894 |
+
self.tok_embeddings = value
|
895 |
+
|
896 |
+
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
|
897 |
+
# create causal mask
|
898 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
899 |
+
combined_attention_mask = None
|
900 |
+
if input_shape[-1] > 1:
|
901 |
+
combined_attention_mask = _make_causal_mask(
|
902 |
+
input_shape,
|
903 |
+
inputs_embeds.dtype,
|
904 |
+
device=inputs_embeds.device,
|
905 |
+
past_key_values_length=past_key_values_length,
|
906 |
+
)
|
907 |
+
|
908 |
+
if attention_mask is not None:
|
909 |
+
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
|
910 |
+
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
|
911 |
+
inputs_embeds.device
|
912 |
+
)
|
913 |
+
combined_attention_mask = (
|
914 |
+
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
|
915 |
+
)
|
916 |
+
|
917 |
+
return combined_attention_mask
|
918 |
+
|
919 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
920 |
+
def forward(
|
921 |
+
self,
|
922 |
+
input_ids: torch.LongTensor = None,
|
923 |
+
attention_mask: Optional[torch.Tensor] = None,
|
924 |
+
position_ids: Optional[torch.LongTensor] = None,
|
925 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
926 |
+
phantom_position: torch.BoolTensor = None,
|
927 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
928 |
+
use_cache: Optional[bool] = None,
|
929 |
+
output_attentions: Optional[bool] = None,
|
930 |
+
output_hidden_states: Optional[bool] = None,
|
931 |
+
return_dict: Optional[bool] = None,
|
932 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
933 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
934 |
+
output_hidden_states = (
|
935 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
936 |
+
)
|
937 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
938 |
+
|
939 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
940 |
+
|
941 |
+
if self.config.attn_implementation == 'flash_attention_2':
|
942 |
+
_import_flash_attn()
|
943 |
+
|
944 |
+
# retrieve input_ids and inputs_embeds
|
945 |
+
if input_ids is not None and inputs_embeds is not None:
|
946 |
+
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
|
947 |
+
elif input_ids is not None:
|
948 |
+
batch_size, seq_length = input_ids.shape[:2]
|
949 |
+
elif inputs_embeds is not None:
|
950 |
+
batch_size, seq_length = inputs_embeds.shape[:2]
|
951 |
+
else:
|
952 |
+
raise ValueError('You have to specify either input_ids or inputs_embeds')
|
953 |
+
|
954 |
+
seq_length_with_past = seq_length
|
955 |
+
past_key_values_length = 0
|
956 |
+
if past_key_values is not None:
|
957 |
+
past_key_values_length = past_key_values[0][0].shape[2]
|
958 |
+
seq_length_with_past = seq_length_with_past + past_key_values_length
|
959 |
+
|
960 |
+
if position_ids is None:
|
961 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
962 |
+
position_ids = torch.arange(
|
963 |
+
past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
|
964 |
+
)
|
965 |
+
position_ids = position_ids.unsqueeze(0)
|
966 |
+
|
967 |
+
if inputs_embeds is None:
|
968 |
+
inputs_embeds = self.tok_embeddings(input_ids)
|
969 |
+
|
970 |
+
if self.config.attn_implementation == 'flash_attention_2':
|
971 |
+
# 2d mask is passed through the layers
|
972 |
+
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
|
973 |
+
else:
|
974 |
+
if attention_mask is None:
|
975 |
+
attention_mask = torch.ones(
|
976 |
+
(batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
|
977 |
+
)
|
978 |
+
attention_mask = self._prepare_decoder_attention_mask(
|
979 |
+
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
|
980 |
+
)
|
981 |
+
|
982 |
+
# embed positions
|
983 |
+
hidden_states = inputs_embeds
|
984 |
+
|
985 |
+
if self.gradient_checkpointing and self.training:
|
986 |
+
if use_cache:
|
987 |
+
logger.warning_once(
|
988 |
+
'`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
|
989 |
+
)
|
990 |
+
use_cache = False
|
991 |
+
|
992 |
+
# decoder layers
|
993 |
+
all_hidden_states = () if output_hidden_states else None
|
994 |
+
all_self_attns = () if output_attentions else None
|
995 |
+
next_decoder_cache = () if use_cache else None
|
996 |
+
|
997 |
+
for idx, decoder_layer in enumerate(self.layers):
|
998 |
+
if output_hidden_states:
|
999 |
+
all_hidden_states += (hidden_states,)
|
1000 |
+
|
1001 |
+
past_key_value = past_key_values[idx] if past_key_values is not None else None
|
1002 |
+
|
1003 |
+
if self.gradient_checkpointing and self.training:
|
1004 |
+
|
1005 |
+
def create_custom_forward(module):
|
1006 |
+
def custom_forward(*inputs):
|
1007 |
+
# None for past_key_value
|
1008 |
+
return module(*inputs, output_attentions, None)
|
1009 |
+
|
1010 |
+
return custom_forward
|
1011 |
+
|
1012 |
+
layer_outputs = torch.utils.checkpoint.checkpoint(
|
1013 |
+
create_custom_forward(decoder_layer),
|
1014 |
+
hidden_states,
|
1015 |
+
attention_mask,
|
1016 |
+
position_ids,
|
1017 |
+
None,
|
1018 |
+
phantom_position
|
1019 |
+
)
|
1020 |
+
else:
|
1021 |
+
layer_outputs = decoder_layer(
|
1022 |
+
hidden_states,
|
1023 |
+
attention_mask=attention_mask,
|
1024 |
+
position_ids=position_ids,
|
1025 |
+
past_key_value=past_key_value,
|
1026 |
+
phantom_position=phantom_position,
|
1027 |
+
output_attentions=output_attentions,
|
1028 |
+
use_cache=use_cache,
|
1029 |
+
)
|
1030 |
+
|
1031 |
+
hidden_states = layer_outputs[0]
|
1032 |
+
|
1033 |
+
if use_cache:
|
1034 |
+
next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
|
1035 |
+
|
1036 |
+
if output_attentions:
|
1037 |
+
all_self_attns += (layer_outputs[1],)
|
1038 |
+
|
1039 |
+
hidden_states = self.norm(hidden_states)
|
1040 |
+
|
1041 |
+
# add hidden states from the last decoder layer
|
1042 |
+
if output_hidden_states:
|
1043 |
+
all_hidden_states += (hidden_states,)
|
1044 |
+
|
1045 |
+
next_cache = next_decoder_cache if use_cache else None
|
1046 |
+
if not return_dict:
|
1047 |
+
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
1048 |
+
return BaseModelOutputWithPast(
|
1049 |
+
last_hidden_state=hidden_states,
|
1050 |
+
past_key_values=next_cache,
|
1051 |
+
hidden_states=all_hidden_states,
|
1052 |
+
attentions=all_self_attns,
|
1053 |
+
)
|
1054 |
+
|
1055 |
+
|
1056 |
+
# Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
|
1057 |
+
class InternLM2ForCausalLM(InternLM2PreTrainedModel):
|
1058 |
+
_auto_class = 'AutoModelForCausalLM'
|
1059 |
+
|
1060 |
+
_tied_weights_keys = ['output.weight']
|
1061 |
+
|
1062 |
+
def __init__(self, config):
|
1063 |
+
super().__init__(config)
|
1064 |
+
self.model = InternLM2Model(config)
|
1065 |
+
self.vocab_size = config.vocab_size
|
1066 |
+
self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
1067 |
+
|
1068 |
+
# Initialize weights and apply final processing
|
1069 |
+
self.post_init()
|
1070 |
+
|
1071 |
+
def get_input_embeddings(self):
|
1072 |
+
return self.model.tok_embeddings
|
1073 |
+
|
1074 |
+
def set_input_embeddings(self, value):
|
1075 |
+
self.model.tok_embeddings = value
|
1076 |
+
|
1077 |
+
def get_output_embeddings(self):
|
1078 |
+
return self.output
|
1079 |
+
|
1080 |
+
def set_output_embeddings(self, new_embeddings):
|
1081 |
+
self.output = new_embeddings
|
1082 |
+
|
1083 |
+
def set_decoder(self, decoder):
|
1084 |
+
self.model = decoder
|
1085 |
+
|
1086 |
+
def get_decoder(self):
|
1087 |
+
return self.model
|
1088 |
+
|
1089 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1090 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
1091 |
+
def forward(
|
1092 |
+
self,
|
1093 |
+
input_ids: torch.LongTensor = None,
|
1094 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1095 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1096 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1097 |
+
phantom_position: torch.BoolTensor = None,
|
1098 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1099 |
+
labels: Optional[torch.LongTensor] = None,
|
1100 |
+
use_cache: Optional[bool] = None,
|
1101 |
+
output_attentions: Optional[bool] = None,
|
1102 |
+
output_hidden_states: Optional[bool] = None,
|
1103 |
+
return_dict: Optional[bool] = None,
|
1104 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1105 |
+
r"""
|
1106 |
+
Args:
|
1107 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
1108 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
1109 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
1110 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
1111 |
+
|
1112 |
+
Returns:
|
1113 |
+
|
1114 |
+
Example:
|
1115 |
+
|
1116 |
+
```python
|
1117 |
+
>>> from transformers import AutoTokenizer, InternLM2ForCausalLM
|
1118 |
+
|
1119 |
+
>>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
|
1120 |
+
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
|
1121 |
+
|
1122 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
1123 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
1124 |
+
|
1125 |
+
>>> # Generate
|
1126 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
1127 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
1128 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
1129 |
+
```"""
|
1130 |
+
|
1131 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
1132 |
+
output_hidden_states = (
|
1133 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
1134 |
+
)
|
1135 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1136 |
+
|
1137 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
1138 |
+
outputs = self.model(
|
1139 |
+
input_ids=input_ids,
|
1140 |
+
attention_mask=attention_mask,
|
1141 |
+
position_ids=position_ids,
|
1142 |
+
past_key_values=past_key_values,
|
1143 |
+
phantom_position=phantom_position,
|
1144 |
+
inputs_embeds=inputs_embeds,
|
1145 |
+
use_cache=use_cache,
|
1146 |
+
output_attentions=output_attentions,
|
1147 |
+
output_hidden_states=output_hidden_states,
|
1148 |
+
return_dict=return_dict,
|
1149 |
+
)
|
1150 |
+
|
1151 |
+
hidden_states = outputs[0]
|
1152 |
+
logits = self.output(hidden_states)
|
1153 |
+
logits = logits.float()
|
1154 |
+
|
1155 |
+
loss = None
|
1156 |
+
if labels is not None:
|
1157 |
+
# Shift so that tokens < n predict n
|
1158 |
+
shift_logits = logits[..., :-1, :].contiguous()
|
1159 |
+
shift_labels = labels[..., 1:].contiguous()
|
1160 |
+
# Flatten the tokens
|
1161 |
+
loss_fct = CrossEntropyLoss()
|
1162 |
+
shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
1163 |
+
shift_labels = shift_labels.view(-1)
|
1164 |
+
# Enable model parallelism
|
1165 |
+
shift_labels = shift_labels.to(shift_logits.device)
|
1166 |
+
loss = loss_fct(shift_logits, shift_labels)
|
1167 |
+
|
1168 |
+
if not return_dict:
|
1169 |
+
output = (logits,) + outputs[1:]
|
1170 |
+
return (loss,) + output if loss is not None else output
|
1171 |
+
|
1172 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
1173 |
+
output = CausalLMOutputWithPast(
|
1174 |
+
loss=loss,
|
1175 |
+
logits=logits,
|
1176 |
+
past_key_values=outputs.past_key_values,
|
1177 |
+
hidden_states=outputs.hidden_states,
|
1178 |
+
attentions=outputs.attentions,
|
1179 |
+
)
|
1180 |
+
output['logits'] = output['logits'].to(device)
|
1181 |
+
return output
|
1182 |
+
|
1183 |
+
def prepare_inputs_for_generation(
|
1184 |
+
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
1185 |
+
):
|
1186 |
+
if past_key_values is not None:
|
1187 |
+
past_length = past_key_values[0][0].shape[2]
|
1188 |
+
|
1189 |
+
# Some generation methods already pass only the last input ID
|
1190 |
+
if input_ids.shape[1] > past_length:
|
1191 |
+
remove_prefix_length = past_length
|
1192 |
+
else:
|
1193 |
+
# Default to old behavior: keep only final ID
|
1194 |
+
remove_prefix_length = input_ids.shape[1] - 1
|
1195 |
+
|
1196 |
+
input_ids = input_ids[:, remove_prefix_length:]
|
1197 |
+
|
1198 |
+
position_ids = kwargs.get('position_ids', None)
|
1199 |
+
if attention_mask is not None and position_ids is None:
|
1200 |
+
# create position_ids on the fly for batch generation
|
1201 |
+
position_ids = attention_mask.long().cumsum(-1) - 1
|
1202 |
+
position_ids.masked_fill_(attention_mask == 0, 1)
|
1203 |
+
if past_key_values:
|
1204 |
+
position_ids = position_ids[:, -input_ids.shape[1] :]
|
1205 |
+
|
1206 |
+
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
|
1207 |
+
if inputs_embeds is not None and past_key_values is None:
|
1208 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
1209 |
+
else:
|
1210 |
+
model_inputs = {'input_ids': input_ids}
|
1211 |
+
|
1212 |
+
model_inputs.update(
|
1213 |
+
{
|
1214 |
+
'position_ids': position_ids,
|
1215 |
+
'past_key_values': past_key_values,
|
1216 |
+
'phantom_position': kwargs.get('phantom_position'),
|
1217 |
+
'use_cache': kwargs.get('use_cache'),
|
1218 |
+
'attention_mask': attention_mask,
|
1219 |
+
}
|
1220 |
+
)
|
1221 |
+
return model_inputs
|
1222 |
+
|
1223 |
+
@staticmethod
|
1224 |
+
def _reorder_cache(past_key_values, beam_idx):
|
1225 |
+
reordered_past = ()
|
1226 |
+
for layer_past in past_key_values:
|
1227 |
+
reordered_past += (
|
1228 |
+
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
1229 |
+
)
|
1230 |
+
return reordered_past
|
1231 |
+
|
1232 |
+
def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
|
1233 |
+
if tokenizer.add_bos_token:
|
1234 |
+
prompt = ''
|
1235 |
+
else:
|
1236 |
+
prompt = tokenizer.bos_token
|
1237 |
+
if meta_instruction:
|
1238 |
+
prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
|
1239 |
+
for record in history:
|
1240 |
+
prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
|
1241 |
+
prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
|
1242 |
+
return tokenizer([prompt], return_tensors='pt')
|
1243 |
+
|
1244 |
+
@torch.no_grad()
|
1245 |
+
def chat(
|
1246 |
+
self,
|
1247 |
+
tokenizer,
|
1248 |
+
query: str,
|
1249 |
+
history: List[Tuple[str, str]] = [],
|
1250 |
+
streamer: Optional[BaseStreamer] = None,
|
1251 |
+
max_new_tokens: int = 1024,
|
1252 |
+
do_sample: bool = True,
|
1253 |
+
temperature: float = 0.8,
|
1254 |
+
top_p: float = 0.8,
|
1255 |
+
meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
|
1256 |
+
'- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
|
1257 |
+
'- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
|
1258 |
+
**kwargs,
|
1259 |
+
):
|
1260 |
+
inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
|
1261 |
+
inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
|
1262 |
+
# also add end-of-assistant token in eos token id to avoid unnecessary generation
|
1263 |
+
eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
|
1264 |
+
outputs = self.generate(
|
1265 |
+
**inputs,
|
1266 |
+
streamer=streamer,
|
1267 |
+
max_new_tokens=max_new_tokens,
|
1268 |
+
do_sample=do_sample,
|
1269 |
+
temperature=temperature,
|
1270 |
+
top_p=top_p,
|
1271 |
+
eos_token_id=eos_token_id,
|
1272 |
+
**kwargs,
|
1273 |
+
)
|
1274 |
+
outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]) :]
|
1275 |
+
response = tokenizer.decode(outputs, skip_special_tokens=True)
|
1276 |
+
response = response.split('<|im_end|>')[0]
|
1277 |
+
history = history + [(query, response)]
|
1278 |
+
return response, history
|
1279 |
+
|
1280 |
+
@torch.no_grad()
|
1281 |
+
def stream_chat(
|
1282 |
+
self,
|
1283 |
+
tokenizer,
|
1284 |
+
query: str,
|
1285 |
+
history: List[Tuple[str, str]] = [],
|
1286 |
+
max_new_tokens: int = 1024,
|
1287 |
+
do_sample: bool = True,
|
1288 |
+
temperature: float = 0.8,
|
1289 |
+
top_p: float = 0.8,
|
1290 |
+
**kwargs,
|
1291 |
+
):
|
1292 |
+
"""
|
1293 |
+
Return a generator in format: (response, history)
|
1294 |
+
Eg.
|
1295 |
+
('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
|
1296 |
+
('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
|
1297 |
+
"""
|
1298 |
+
if BaseStreamer is None:
|
1299 |
+
raise ModuleNotFoundError(
|
1300 |
+
'The version of `transformers` is too low. Please make sure '
|
1301 |
+
'that you have installed `transformers>=4.28.0`.'
|
1302 |
+
)
|
1303 |
+
|
1304 |
+
response_queue = queue.Queue(maxsize=20)
|
1305 |
+
|
1306 |
+
class ChatStreamer(BaseStreamer):
|
1307 |
+
def __init__(self, tokenizer) -> None:
|
1308 |
+
super().__init__()
|
1309 |
+
self.tokenizer = tokenizer
|
1310 |
+
self.queue = response_queue
|
1311 |
+
self.query = query
|
1312 |
+
self.history = history
|
1313 |
+
self.response = ''
|
1314 |
+
self.cache = []
|
1315 |
+
self.received_inputs = False
|
1316 |
+
self.queue.put((self.response, history + [(self.query, self.response)]))
|
1317 |
+
|
1318 |
+
def put(self, value):
|
1319 |
+
if len(value.shape) > 1 and value.shape[0] > 1:
|
1320 |
+
raise ValueError('ChatStreamer only supports batch size 1')
|
1321 |
+
elif len(value.shape) > 1:
|
1322 |
+
value = value[0]
|
1323 |
+
|
1324 |
+
if not self.received_inputs:
|
1325 |
+
# The first received value is input_ids, ignore here
|
1326 |
+
self.received_inputs = True
|
1327 |
+
return
|
1328 |
+
|
1329 |
+
self.cache.extend(value.tolist())
|
1330 |
+
token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
|
1331 |
+
if token.strip() != '<|im_end|>':
|
1332 |
+
self.response = self.response + token
|
1333 |
+
history = self.history + [(self.query, self.response)]
|
1334 |
+
self.queue.put((self.response, history))
|
1335 |
+
self.cache = []
|
1336 |
+
else:
|
1337 |
+
self.end()
|
1338 |
+
|
1339 |
+
def end(self):
|
1340 |
+
self.queue.put(None)
|
1341 |
+
|
1342 |
+
def stream_producer():
|
1343 |
+
return self.chat(
|
1344 |
+
tokenizer=tokenizer,
|
1345 |
+
query=query,
|
1346 |
+
streamer=ChatStreamer(tokenizer=tokenizer),
|
1347 |
+
history=history,
|
1348 |
+
max_new_tokens=max_new_tokens,
|
1349 |
+
do_sample=do_sample,
|
1350 |
+
temperature=temperature,
|
1351 |
+
top_p=top_p,
|
1352 |
+
**kwargs,
|
1353 |
+
)
|
1354 |
+
|
1355 |
+
def consumer():
|
1356 |
+
producer = threading.Thread(target=stream_producer)
|
1357 |
+
producer.start()
|
1358 |
+
while True:
|
1359 |
+
res = response_queue.get()
|
1360 |
+
if res is None:
|
1361 |
+
return
|
1362 |
+
yield res
|
1363 |
+
|
1364 |
+
return consumer()
|
1365 |
+
|
1366 |
+
|
1367 |
+
# Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
|
1368 |
+
@add_start_docstrings(
|
1369 |
+
"""
|
1370 |
+
The InternLM2 Model transformer with a sequence classification head on top (linear layer).
|
1371 |
+
|
1372 |
+
[`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
|
1373 |
+
as other causal models (e.g. GPT-2) do.
|
1374 |
+
|
1375 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
1376 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
1377 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
1378 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
1379 |
+
each row of the batch).
|
1380 |
+
""",
|
1381 |
+
InternLM2_START_DOCSTRING,
|
1382 |
+
)
|
1383 |
+
class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
|
1384 |
+
def __init__(self, config):
|
1385 |
+
super().__init__(config)
|
1386 |
+
self.num_labels = config.num_labels
|
1387 |
+
self.model = InternLM2Model(config)
|
1388 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1389 |
+
|
1390 |
+
# Initialize weights and apply final processing
|
1391 |
+
self.post_init()
|
1392 |
+
|
1393 |
+
def get_input_embeddings(self):
|
1394 |
+
return self.model.tok_embeddings
|
1395 |
+
|
1396 |
+
def set_input_embeddings(self, value):
|
1397 |
+
self.model.tok_embeddings = value
|
1398 |
+
|
1399 |
+
@add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
|
1400 |
+
def forward(
|
1401 |
+
self,
|
1402 |
+
input_ids: torch.LongTensor = None,
|
1403 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1404 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1405 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1406 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1407 |
+
labels: Optional[torch.LongTensor] = None,
|
1408 |
+
use_cache: Optional[bool] = None,
|
1409 |
+
output_attentions: Optional[bool] = None,
|
1410 |
+
output_hidden_states: Optional[bool] = None,
|
1411 |
+
return_dict: Optional[bool] = None,
|
1412 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1413 |
+
r"""
|
1414 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1415 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1416 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1417 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1418 |
+
"""
|
1419 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1420 |
+
|
1421 |
+
transformer_outputs = self.model(
|
1422 |
+
input_ids,
|
1423 |
+
attention_mask=attention_mask,
|
1424 |
+
position_ids=position_ids,
|
1425 |
+
past_key_values=past_key_values,
|
1426 |
+
inputs_embeds=inputs_embeds,
|
1427 |
+
use_cache=use_cache,
|
1428 |
+
output_attentions=output_attentions,
|
1429 |
+
output_hidden_states=output_hidden_states,
|
1430 |
+
return_dict=return_dict,
|
1431 |
+
)
|
1432 |
+
hidden_states = transformer_outputs[0]
|
1433 |
+
logits = self.score(hidden_states)
|
1434 |
+
|
1435 |
+
if input_ids is not None:
|
1436 |
+
batch_size = input_ids.shape[0]
|
1437 |
+
else:
|
1438 |
+
batch_size = inputs_embeds.shape[0]
|
1439 |
+
|
1440 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1441 |
+
raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
|
1442 |
+
if self.config.pad_token_id is None:
|
1443 |
+
sequence_lengths = -1
|
1444 |
+
else:
|
1445 |
+
if input_ids is not None:
|
1446 |
+
sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
|
1447 |
+
logits.device
|
1448 |
+
)
|
1449 |
+
else:
|
1450 |
+
sequence_lengths = -1
|
1451 |
+
|
1452 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
1453 |
+
|
1454 |
+
loss = None
|
1455 |
+
if labels is not None:
|
1456 |
+
labels = labels.to(logits.device)
|
1457 |
+
if self.config.problem_type is None:
|
1458 |
+
if self.num_labels == 1:
|
1459 |
+
self.config.problem_type = 'regression'
|
1460 |
+
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
1461 |
+
self.config.problem_type = 'single_label_classification'
|
1462 |
+
else:
|
1463 |
+
self.config.problem_type = 'multi_label_classification'
|
1464 |
+
|
1465 |
+
if self.config.problem_type == 'regression':
|
1466 |
+
loss_fct = MSELoss()
|
1467 |
+
if self.num_labels == 1:
|
1468 |
+
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
1469 |
+
else:
|
1470 |
+
loss = loss_fct(pooled_logits, labels)
|
1471 |
+
elif self.config.problem_type == 'single_label_classification':
|
1472 |
+
loss_fct = CrossEntropyLoss()
|
1473 |
+
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
1474 |
+
elif self.config.problem_type == 'multi_label_classification':
|
1475 |
+
loss_fct = BCEWithLogitsLoss()
|
1476 |
+
loss = loss_fct(pooled_logits, labels)
|
1477 |
+
if not return_dict:
|
1478 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1479 |
+
return ((loss,) + output) if loss is not None else output
|
1480 |
+
|
1481 |
+
return SequenceClassifierOutputWithPast(
|
1482 |
+
loss=loss,
|
1483 |
+
logits=pooled_logits,
|
1484 |
+
past_key_values=transformer_outputs.past_key_values,
|
1485 |
+
hidden_states=transformer_outputs.hidden_states,
|
1486 |
+
attentions=transformer_outputs.attentions,
|
1487 |
+
)
|
model/arch_7b/modeling_phantom.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch.utils.checkpoint
|
4 |
+
from torch import nn
|
5 |
+
from transformers import GenerationConfig
|
6 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
7 |
+
from transformers.modeling_utils import PreTrainedModel
|
8 |
+
|
9 |
+
from .configuration_phantom import PhantomConfig
|
10 |
+
from .modeling_intern_vit import InternVisionModel
|
11 |
+
from .modeling_internlm2 import InternLM2ForCausalLM
|
12 |
+
|
13 |
+
from utils.utils import *
|
14 |
+
|
15 |
+
class PhantomForCausalLM(PreTrainedModel):
|
16 |
+
config_class = PhantomConfig
|
17 |
+
main_input_name = 'pixel_values'
|
18 |
+
_supports_flash_attn_2 = True
|
19 |
+
_no_split_modules = ['InternVisionModel', 'InternLM2DecoderLayer']
|
20 |
+
|
21 |
+
def __init__(self, config: PhantomConfig):
|
22 |
+
super().__init__(config)
|
23 |
+
image_size = config.force_image_size or config.vision_config.image_size
|
24 |
+
patch_size = config.vision_config.patch_size
|
25 |
+
self.patch_size = patch_size
|
26 |
+
self.template = config.template
|
27 |
+
self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
|
28 |
+
self.downsample_ratio = config.downsample_ratio
|
29 |
+
|
30 |
+
self.vision_model = InternVisionModel(config.vision_config)
|
31 |
+
self.language_model = InternLM2ForCausalLM(config.llm_config)
|
32 |
+
|
33 |
+
vit_hidden_size = config.vision_config.hidden_size
|
34 |
+
llm_hidden_size = config.llm_config.hidden_size
|
35 |
+
|
36 |
+
self.vision_proj = nn.Sequential(
|
37 |
+
nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
|
38 |
+
nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
|
39 |
+
nn.GELU(),
|
40 |
+
nn.Linear(llm_hidden_size, llm_hidden_size)
|
41 |
+
)
|
42 |
+
|
43 |
+
# prompt rule
|
44 |
+
self.prompt_rule = {
|
45 |
+
"system_start": "<|im_start|>system\n",
|
46 |
+
"system_end": "<|im_end|>",
|
47 |
+
"user_start": "<|im_start|>user\n",
|
48 |
+
"user_end": "<|im_end|>",
|
49 |
+
"assistant_start": "<|im_start|>assistant\n",
|
50 |
+
"assistant_end": "<|im_end|>",
|
51 |
+
"test_start": "assistant\n",
|
52 |
+
"test_end": "<|im_end|>",
|
53 |
+
"split": "",
|
54 |
+
}
|
55 |
+
|
56 |
+
def eval_process(
|
57 |
+
self,
|
58 |
+
inputs,
|
59 |
+
tokenizer,
|
60 |
+
data,
|
61 |
+
device,
|
62 |
+
):
|
63 |
+
batched_image=[]
|
64 |
+
batched_qa_prompt=[]
|
65 |
+
batched_phantom_position = []
|
66 |
+
for _input in inputs:
|
67 |
+
|
68 |
+
# making image prompt
|
69 |
+
if 'image' in _input.keys() and _input['image'] != None:
|
70 |
+
process_image = dynamic_preprocess(_input['image'].to(device))
|
71 |
+
dynamic_process_image = torch.stack([dynamic_transform(image) for image in process_image]).to(device)
|
72 |
+
img_token_number = dynamic_process_image.shape[0] * 256
|
73 |
+
batched_image.append(dynamic_process_image)
|
74 |
+
|
75 |
+
# make question and answer
|
76 |
+
question = _input['question']
|
77 |
+
|
78 |
+
# make instruction (qa pair) and label
|
79 |
+
qa_prompt = make_instruction(question, data, self.prompt_rule)
|
80 |
+
|
81 |
+
# adding image special tokens to question
|
82 |
+
if 'image' in _input.keys():
|
83 |
+
qa_prompt = qa_prompt.replace('<image>', '<img><IMG_CONTEXT></img>')
|
84 |
+
|
85 |
+
# add bundle image tokens if it has <image> token
|
86 |
+
qa_prompt = add_bundle_tokens(qa_prompt, '<IMG_CONTEXT>', img_token_number)
|
87 |
+
|
88 |
+
# phantom_position
|
89 |
+
label = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].to(device)
|
90 |
+
phantom_position = torch.zeros_like(label)
|
91 |
+
phantom_position[0] = 1
|
92 |
+
|
93 |
+
# batched processing
|
94 |
+
batched_qa_prompt.append(qa_prompt)
|
95 |
+
batched_phantom_position.append(phantom_position.flip(dims=[0]))
|
96 |
+
|
97 |
+
'''For Final Outputs'''
|
98 |
+
qa_prompts = tokenizer(batched_qa_prompt, padding='longest', return_tensors="pt", add_special_tokens=False)
|
99 |
+
|
100 |
+
# [1] input_ids
|
101 |
+
input_ids = qa_prompts.input_ids.to(device)
|
102 |
+
|
103 |
+
# [2] attention_mask
|
104 |
+
attention_mask = qa_prompts.attention_mask.to(device)
|
105 |
+
|
106 |
+
# [3] Phantom Position
|
107 |
+
batched_phantom_position = torch.nn.utils.rnn.pad_sequence(batched_phantom_position, batch_first=True, padding_value=0).flip(dims=[1]) # padding left
|
108 |
+
|
109 |
+
if len(batched_image):
|
110 |
+
return {"input_ids": input_ids,
|
111 |
+
"attention_mask": attention_mask,
|
112 |
+
"pixel_values": torch.cat(batched_image, dim=0).to(device),
|
113 |
+
"phantom_position": batched_phantom_position.bool()
|
114 |
+
}
|
115 |
+
else:
|
116 |
+
return {"input_ids": input_ids,
|
117 |
+
"attention_mask": attention_mask,
|
118 |
+
"phantom_position": batched_phantom_position.bool()
|
119 |
+
}
|
120 |
+
|
121 |
+
def extract_feature(self, pixel_values):
|
122 |
+
vit_embeds = self.vision_model(
|
123 |
+
pixel_values=pixel_values,
|
124 |
+
output_hidden_states=False,
|
125 |
+
return_dict=True).last_hidden_state
|
126 |
+
vit_embeds = vit_embeds[:, 1:, :]
|
127 |
+
|
128 |
+
h = w = int(vit_embeds.shape[1] ** 0.5)
|
129 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
|
130 |
+
vit_embeds = pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
|
131 |
+
vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
|
132 |
+
vit_embeds = self.vision_proj(vit_embeds)
|
133 |
+
return vit_embeds
|
134 |
+
|
135 |
+
@torch.no_grad()
|
136 |
+
def generate(
|
137 |
+
self,
|
138 |
+
pixel_values: Optional[torch.FloatTensor] = None,
|
139 |
+
input_ids: Optional[torch.FloatTensor] = None,
|
140 |
+
attention_mask: Optional[torch.LongTensor] = None,
|
141 |
+
phantom_position: torch.BoolTensor = None,
|
142 |
+
generation_config: Optional[GenerationConfig] = None,
|
143 |
+
output_hidden_states: Optional[bool] = None,
|
144 |
+
return_dict: Optional[bool] = None,
|
145 |
+
**generate_kwargs,
|
146 |
+
) -> torch.LongTensor:
|
147 |
+
|
148 |
+
if pixel_values is not None:
|
149 |
+
vit_embeds = self.extract_feature(pixel_values.to(torch.bfloat16))
|
150 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
151 |
+
B, N, C = input_embeds.shape
|
152 |
+
input_embeds = input_embeds.reshape(B * N, C)
|
153 |
+
|
154 |
+
input_ids = input_ids.reshape(B * N)
|
155 |
+
selected = (input_ids == self.config.image_token_index)
|
156 |
+
assert selected.sum() != 0
|
157 |
+
input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
|
158 |
+
|
159 |
+
input_embeds = input_embeds.reshape(B, N, C)
|
160 |
+
else:
|
161 |
+
input_embeds = self.language_model.get_input_embeddings()(input_ids)
|
162 |
+
|
163 |
+
outputs = self.language_model.generate(
|
164 |
+
inputs_embeds=input_embeds,
|
165 |
+
attention_mask=attention_mask,
|
166 |
+
phantom_position=phantom_position,
|
167 |
+
generation_config=generation_config,
|
168 |
+
output_hidden_states=output_hidden_states,
|
169 |
+
return_dict=return_dict,
|
170 |
+
use_cache=True,
|
171 |
+
pad_token_id=self.config.eos_token_id,
|
172 |
+
eos_token_id=self.config.eos_token_id,
|
173 |
+
**generate_kwargs,
|
174 |
+
)
|
175 |
+
|
176 |
+
return outputs
|
model/arch_7b/tokenization_internlm2.py
ADDED
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
|
2 |
+
#
|
3 |
+
# This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
"""Tokenization classes for InternLM."""
|
18 |
+
import os
|
19 |
+
from shutil import copyfile
|
20 |
+
from typing import Any, Dict, List, Optional, Tuple
|
21 |
+
|
22 |
+
import sentencepiece as spm
|
23 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
24 |
+
from transformers.utils import logging
|
25 |
+
|
26 |
+
logger = logging.get_logger(__name__)
|
27 |
+
|
28 |
+
VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
|
29 |
+
|
30 |
+
PRETRAINED_VOCAB_FILES_MAP = {}
|
31 |
+
|
32 |
+
|
33 |
+
# Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
|
34 |
+
class InternLM2Tokenizer(PreTrainedTokenizer):
|
35 |
+
"""
|
36 |
+
Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
vocab_file (`str`):
|
40 |
+
Path to the vocabulary file.
|
41 |
+
"""
|
42 |
+
|
43 |
+
vocab_files_names = VOCAB_FILES_NAMES
|
44 |
+
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
|
45 |
+
model_input_names = ['input_ids', 'attention_mask']
|
46 |
+
_auto_class = 'AutoTokenizer'
|
47 |
+
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
vocab_file,
|
51 |
+
unk_token='<unk>',
|
52 |
+
bos_token='<s>',
|
53 |
+
eos_token='</s>',
|
54 |
+
pad_token='</s>',
|
55 |
+
sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
56 |
+
add_bos_token=True,
|
57 |
+
add_eos_token=False,
|
58 |
+
decode_with_prefix_space=False,
|
59 |
+
clean_up_tokenization_spaces=False,
|
60 |
+
**kwargs,
|
61 |
+
):
|
62 |
+
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
63 |
+
self.vocab_file = vocab_file
|
64 |
+
self.add_bos_token = add_bos_token
|
65 |
+
self.add_eos_token = add_eos_token
|
66 |
+
self.decode_with_prefix_space = decode_with_prefix_space
|
67 |
+
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
68 |
+
self.sp_model.Load(vocab_file)
|
69 |
+
self._no_prefix_space_tokens = None
|
70 |
+
super().__init__(
|
71 |
+
bos_token=bos_token,
|
72 |
+
eos_token=eos_token,
|
73 |
+
unk_token=unk_token,
|
74 |
+
pad_token=pad_token,
|
75 |
+
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
76 |
+
**kwargs,
|
77 |
+
)
|
78 |
+
|
79 |
+
@property
|
80 |
+
def no_prefix_space_tokens(self):
|
81 |
+
if self._no_prefix_space_tokens is None:
|
82 |
+
vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
|
83 |
+
self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
|
84 |
+
return self._no_prefix_space_tokens
|
85 |
+
|
86 |
+
@property
|
87 |
+
def vocab_size(self):
|
88 |
+
"""Returns vocab size"""
|
89 |
+
return self.sp_model.get_piece_size()
|
90 |
+
|
91 |
+
@property
|
92 |
+
def bos_token_id(self) -> Optional[int]:
|
93 |
+
return self.sp_model.bos_id()
|
94 |
+
|
95 |
+
@property
|
96 |
+
def eos_token_id(self) -> Optional[int]:
|
97 |
+
return self.sp_model.eos_id()
|
98 |
+
|
99 |
+
def get_vocab(self):
|
100 |
+
"""Returns vocab as a dict"""
|
101 |
+
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
102 |
+
vocab.update(self.added_tokens_encoder)
|
103 |
+
return vocab
|
104 |
+
|
105 |
+
def _tokenize(self, text):
|
106 |
+
"""Returns a tokenized string."""
|
107 |
+
return self.sp_model.encode(text, out_type=str)
|
108 |
+
|
109 |
+
def _convert_token_to_id(self, token):
|
110 |
+
"""Converts a token (str) in an id using the vocab."""
|
111 |
+
return self.sp_model.piece_to_id(token)
|
112 |
+
|
113 |
+
def _convert_id_to_token(self, index):
|
114 |
+
"""Converts an index (integer) in a token (str) using the vocab."""
|
115 |
+
token = self.sp_model.IdToPiece(index)
|
116 |
+
return token
|
117 |
+
|
118 |
+
def _maybe_add_prefix_space(self, tokens, decoded):
|
119 |
+
if tokens and tokens[0] not in self.no_prefix_space_tokens:
|
120 |
+
return ' ' + decoded
|
121 |
+
else:
|
122 |
+
return decoded
|
123 |
+
|
124 |
+
def convert_tokens_to_string(self, tokens):
|
125 |
+
"""Converts a sequence of tokens (string) in a single string."""
|
126 |
+
current_sub_tokens = []
|
127 |
+
out_string = ''
|
128 |
+
prev_is_special = False
|
129 |
+
for token in tokens:
|
130 |
+
# make sure that special tokens are not decoded using sentencepiece model
|
131 |
+
if token in self.all_special_tokens:
|
132 |
+
if not prev_is_special:
|
133 |
+
out_string += ' '
|
134 |
+
out_string += self.sp_model.decode(current_sub_tokens) + token
|
135 |
+
prev_is_special = True
|
136 |
+
current_sub_tokens = []
|
137 |
+
else:
|
138 |
+
current_sub_tokens.append(token)
|
139 |
+
prev_is_special = False
|
140 |
+
out_string += self.sp_model.decode(current_sub_tokens)
|
141 |
+
out_string = self.clean_up_tokenization(out_string)
|
142 |
+
out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
|
143 |
+
return out_string[1:]
|
144 |
+
|
145 |
+
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
146 |
+
"""
|
147 |
+
Save the vocabulary and special tokens file to a directory.
|
148 |
+
|
149 |
+
Args:
|
150 |
+
save_directory (`str`):
|
151 |
+
The directory in which to save the vocabulary.
|
152 |
+
|
153 |
+
Returns:
|
154 |
+
`Tuple(str)`: Paths to the files saved.
|
155 |
+
"""
|
156 |
+
if not os.path.isdir(save_directory):
|
157 |
+
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
|
158 |
+
return
|
159 |
+
out_vocab_file = os.path.join(
|
160 |
+
save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
|
161 |
+
)
|
162 |
+
|
163 |
+
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
164 |
+
copyfile(self.vocab_file, out_vocab_file)
|
165 |
+
elif not os.path.isfile(self.vocab_file):
|
166 |
+
with open(out_vocab_file, 'wb') as fi:
|
167 |
+
content_spiece_model = self.sp_model.serialized_model_proto()
|
168 |
+
fi.write(content_spiece_model)
|
169 |
+
|
170 |
+
return (out_vocab_file,)
|
171 |
+
|
172 |
+
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
173 |
+
if self.add_bos_token:
|
174 |
+
bos_token_ids = [self.bos_token_id]
|
175 |
+
else:
|
176 |
+
bos_token_ids = []
|
177 |
+
|
178 |
+
output = bos_token_ids + token_ids_0
|
179 |
+
|
180 |
+
if token_ids_1 is not None:
|
181 |
+
output = output + token_ids_1
|
182 |
+
|
183 |
+
if self.add_eos_token:
|
184 |
+
output = output + [self.eos_token_id]
|
185 |
+
|
186 |
+
return output
|
187 |
+
|
188 |
+
def get_special_tokens_mask(
|
189 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
190 |
+
) -> List[int]:
|
191 |
+
"""
|
192 |
+
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
193 |
+
special tokens using the tokenizer `prepare_for_model` method.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
token_ids_0 (`List[int]`):
|
197 |
+
List of IDs.
|
198 |
+
token_ids_1 (`List[int]`, *optional*):
|
199 |
+
Optional second list of IDs for sequence pairs.
|
200 |
+
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
201 |
+
Whether or not the token list is already formatted with special tokens for the model.
|
202 |
+
|
203 |
+
Returns:
|
204 |
+
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
205 |
+
"""
|
206 |
+
if already_has_special_tokens:
|
207 |
+
return super().get_special_tokens_mask(
|
208 |
+
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
209 |
+
)
|
210 |
+
|
211 |
+
if token_ids_1 is None:
|
212 |
+
return [1] + ([0] * len(token_ids_0)) + [1]
|
213 |
+
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
|
214 |
+
|
215 |
+
def create_token_type_ids_from_sequences(
|
216 |
+
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
217 |
+
) -> List[int]:
|
218 |
+
"""
|
219 |
+
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
|
220 |
+
use of token type ids, therefore a list of zeros is returned.
|
221 |
+
|
222 |
+
Args:
|
223 |
+
token_ids_0 (`List[int]`):
|
224 |
+
List of IDs.
|
225 |
+
token_ids_1 (`List[int]`, *optional*):
|
226 |
+
Optional second list of IDs for sequence pairs.
|
227 |
+
|
228 |
+
Returns:
|
229 |
+
`List[int]`: List of zeros.
|
230 |
+
"""
|
231 |
+
eos = [self.eos_token_id]
|
232 |
+
|
233 |
+
if token_ids_1 is None:
|
234 |
+
return len(token_ids_0 + eos) * [0]
|
235 |
+
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
model/load_model.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import warnings
|
3 |
+
from utils.utils import *
|
4 |
+
from config import *
|
5 |
+
from transformers import AutoTokenizer
|
6 |
+
from transformers import BitsAndBytesConfig
|
7 |
+
|
8 |
+
warnings.filterwarnings(action='ignore')
|
9 |
+
|
10 |
+
def load_model(size):
|
11 |
+
|
12 |
+
"""
|
13 |
+
model selection
|
14 |
+
"""
|
15 |
+
|
16 |
+
# Phantom Bit
|
17 |
+
bit_quant_skip = ["linear_q", "linear_k", "linear_v", "linear_o", "gating_phantom_1", "gating_phantom_2"]
|
18 |
+
# Vision target modules
|
19 |
+
if size == '7b':
|
20 |
+
from .arch_7b.modeling_phantom import PhantomForCausalLM
|
21 |
+
from .arch_7b.tokenization_internlm2 import InternLM2Tokenizer as PhantomTokenizer
|
22 |
+
path = MODEL_7B
|
23 |
+
bit_quant_skip += ["mlp1", "wqkv", "output"]
|
24 |
+
|
25 |
+
# Loading tokenizer
|
26 |
+
tokenizer = PhantomTokenizer.from_pretrained(path, padding_side='left')
|
27 |
+
|
28 |
+
# bits
|
29 |
+
bits = 8
|
30 |
+
|
31 |
+
elif size == '3.8b':
|
32 |
+
from .arch_3_8b.modeling_phantom import PhantomForCausalLM
|
33 |
+
path = MODEL_3_8B
|
34 |
+
bit_quant_skip += ["mlp1", "qkv_proj", "phantom", "lm_head"]
|
35 |
+
|
36 |
+
# Loading tokenizer
|
37 |
+
tokenizer = AutoTokenizer.from_pretrained(path, padding_side='left')
|
38 |
+
|
39 |
+
# bits
|
40 |
+
bits = 8
|
41 |
+
|
42 |
+
elif size == '1.8b':
|
43 |
+
from .arch_1_8b.modeling_phantom import PhantomForCausalLM
|
44 |
+
from .arch_1_8b.tokenization_internlm2 import InternLM2Tokenizer as PhantomTokenizer
|
45 |
+
path = MODEL_1_8B
|
46 |
+
bit_quant_skip += ["mlp1", "wqkv", "phantom", "output"]
|
47 |
+
|
48 |
+
# Loading tokenizer
|
49 |
+
tokenizer = PhantomTokenizer.from_pretrained(path, padding_side='left')
|
50 |
+
|
51 |
+
# bits
|
52 |
+
bits = 8
|
53 |
+
|
54 |
+
elif size == '0.5b':
|
55 |
+
from .arch_0_5b.modeling_phantom import PhantomForCausalLM
|
56 |
+
path = MODEL_0_5B
|
57 |
+
bit_quant_skip += ["mlp1", "q_proj", "k_proj", "v_proj", "phantom", "lm_head"]
|
58 |
+
|
59 |
+
# Loading tokenizer
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained(path, padding_side='left')
|
61 |
+
|
62 |
+
# bits
|
63 |
+
bits = 8
|
64 |
+
else:
|
65 |
+
raise Exception("Unsupported Size")
|
66 |
+
|
67 |
+
|
68 |
+
# huggingface model configuration
|
69 |
+
huggingface_config = {}
|
70 |
+
|
71 |
+
# Bit quantization
|
72 |
+
if bits in [4, 8]:
|
73 |
+
huggingface_config.update(dict(
|
74 |
+
torch_dtype=torch.bfloat16,
|
75 |
+
low_cpu_mem_usage=True,
|
76 |
+
attn_implementation="flash_attention_2",
|
77 |
+
quantization_config=BitsAndBytesConfig(
|
78 |
+
load_in_4bit=bits == 4,
|
79 |
+
load_in_8bit=bits == 8,
|
80 |
+
llm_int8_skip_modules=bit_quant_skip,
|
81 |
+
llm_int8_threshold=6.0,
|
82 |
+
llm_int8_has_fp16_weight=False,
|
83 |
+
bnb_4bit_compute_dtype=torch.bfloat16,
|
84 |
+
bnb_4bit_use_double_quant=True,
|
85 |
+
bnb_4bit_quant_type='nf4'
|
86 |
+
)
|
87 |
+
))
|
88 |
+
else:
|
89 |
+
huggingface_config.update(dict(
|
90 |
+
torch_dtype=torch.bfloat16,
|
91 |
+
low_cpu_mem_usage=True,
|
92 |
+
attn_implementation="flash_attention_2",
|
93 |
+
))
|
94 |
+
|
95 |
+
# Model Uploading
|
96 |
+
model = PhantomForCausalLM.from_pretrained(path, **huggingface_config)
|
97 |
+
|
98 |
+
# Parameter arrangement
|
99 |
+
freeze_model(model)
|
100 |
+
model.eval()
|
101 |
+
|
102 |
+
# bfloat16/float16 conversion
|
103 |
+
for param in model.parameters():
|
104 |
+
if 'float32' in str(param.dtype).lower() or 'float16' in str(param.dtype).lower():
|
105 |
+
param.data = param.data.to(torch.bfloat16)
|
106 |
+
|
107 |
+
return model, tokenizer
|
requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
bitsandbytes
|
3 |
+
accelerate
|
4 |
+
peft
|
5 |
+
pandas
|
6 |
+
pyarrow
|
7 |
+
jsonlines
|
8 |
+
wandb
|
9 |
+
einops
|
10 |
+
timm
|
11 |
+
einops_exts
|
12 |
+
sentencepiece
|
13 |
+
shortuuid
|
14 |
+
seaborn
|
15 |
+
matplotlib
|
16 |
+
scikit-learn
|
17 |
+
word2number
|
18 |
+
Rouge
|
19 |
+
gradio
|
20 |
+
spaces
|
utils/__init__.py
ADDED
File without changes
|
utils/ddp_accel.yaml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
compute_environment: LOCAL_MACHINE
|
2 |
+
debug: false
|
3 |
+
distributed_type: MULTI_GPU
|
4 |
+
downcast_bf16: 'no'
|
5 |
+
gpu_ids: all
|
6 |
+
machine_rank: 0
|
7 |
+
main_training_function: main
|
8 |
+
mixed_precision: 'no'
|
9 |
+
num_machines: 1
|
10 |
+
num_processes: 1
|
11 |
+
rdzv_backend: static
|
12 |
+
same_network: true
|
13 |
+
tpu_env: []
|
14 |
+
tpu_use_cluster: false
|
15 |
+
tpu_use_sudo: false
|
16 |
+
use_cpu: false
|
utils/ds_accel.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
compute_environment: LOCAL_MACHINE
|
2 |
+
debug: false
|
3 |
+
deepspeed_config:
|
4 |
+
gradient_accumulation_steps: 1
|
5 |
+
offload_optimizer_device: none
|
6 |
+
offload_param_device: none
|
7 |
+
zero3_init_flag: false
|
8 |
+
zero3_save_16bit_model: false
|
9 |
+
zero_stage: 3
|
10 |
+
distributed_type: DEEPSPEED
|
11 |
+
downcast_bf16: 'no'
|
12 |
+
enable_cpu_affinity: false
|
13 |
+
machine_rank: 0
|
14 |
+
main_training_function: main
|
15 |
+
mixed_precision: 'no'
|
16 |
+
num_machines: 1
|
17 |
+
num_processes: 1
|
18 |
+
rdzv_backend: static
|
19 |
+
same_network: true
|
20 |
+
tpu_env: []
|
21 |
+
tpu_use_cluster: false
|
22 |
+
tpu_use_sudo: false
|
23 |
+
use_cpu: false
|
utils/utils.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import math
|
3 |
+
import torch
|
4 |
+
from config import *
|
5 |
+
from PIL import Image
|
6 |
+
import torch.nn as nn
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from torchvision.transforms.functional import to_pil_image
|
9 |
+
from torchvision.transforms.functional import pil_to_tensor
|
10 |
+
|
11 |
+
output_filtering = lambda x, model: x.split(model.prompt_rule["test_start"])[-1].split(model.prompt_rule["test_end"])[0].strip()
|
12 |
+
def memory_optimization():
|
13 |
+
# memory deallocation
|
14 |
+
gc.collect()
|
15 |
+
|
16 |
+
# removing cache
|
17 |
+
torch.cuda.empty_cache()
|
18 |
+
|
19 |
+
def freeze_model(model):
|
20 |
+
for param in model.parameters():
|
21 |
+
param.requires_grad=False
|
22 |
+
|
23 |
+
def find_special_token(string, special_token):
|
24 |
+
start = 0
|
25 |
+
while True:
|
26 |
+
start = string.find(special_token, start)
|
27 |
+
if start == -1: return
|
28 |
+
yield start
|
29 |
+
start += len(special_token) # use start += 1 to find overlapping matches
|
30 |
+
|
31 |
+
def add_bundle_tokens(input_string, special_token, num):
|
32 |
+
|
33 |
+
# number of special tokens in input_string
|
34 |
+
num_special_tokens = len(list(find_special_token(input_string, special_token)))
|
35 |
+
|
36 |
+
# No special token -> return the raw
|
37 |
+
if not num_special_tokens:
|
38 |
+
return input_string
|
39 |
+
|
40 |
+
result = ""
|
41 |
+
index = 0
|
42 |
+
while index < len(input_string):
|
43 |
+
if input_string[index:index + len(special_token)] == special_token:
|
44 |
+
result += special_token * num
|
45 |
+
index += len(special_token)
|
46 |
+
else:
|
47 |
+
result += input_string[index]
|
48 |
+
index += 1
|
49 |
+
|
50 |
+
assert len(list(find_special_token(result, special_token))) == num_special_tokens * num
|
51 |
+
return result
|
52 |
+
|
53 |
+
def make_instruction_and_label(question, answer, tokenizer, device, prompt_rule, config):
|
54 |
+
|
55 |
+
qa_prompt = make_human_string(prompt_rule["user_start"]+question+prompt_rule["user_end"],
|
56 |
+
prompt_rule["assistant_start"],
|
57 |
+
split=prompt_rule["split"])
|
58 |
+
|
59 |
+
# Only QA Prompt Length
|
60 |
+
length = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].shape[0]
|
61 |
+
|
62 |
+
# Concat QA Prompt + Answer Length + stop token
|
63 |
+
qa_prompt = qa_prompt + answer + prompt_rule["assistant_end"]
|
64 |
+
|
65 |
+
# label
|
66 |
+
label = tokenizer(qa_prompt, return_tensors='pt', add_special_tokens=False).input_ids[0].to(device)
|
67 |
+
|
68 |
+
# phantom_position
|
69 |
+
phantom_position = torch.zeros_like(label)
|
70 |
+
phantom_position[0] = 1
|
71 |
+
|
72 |
+
# add ignore index to label
|
73 |
+
label[:length] = config.ignore_index
|
74 |
+
|
75 |
+
return qa_prompt, label, phantom_position
|
76 |
+
|
77 |
+
def make_instruction(question, dataset, prompt_rule):
|
78 |
+
|
79 |
+
if dataset != "mathverse" and dataset != "hallusionbench" and dataset == "demo":
|
80 |
+
question = "<image>" + question
|
81 |
+
|
82 |
+
if dataset in ["sqa", "mmbench", "mmbench_cn", "mmbench_dev", "mmbench_cn_dev", "seed", "seed-2-plus", "qbench", "ai2d", "mmstar", "cvbench", "blink"]:
|
83 |
+
question = question + "\nAnswer with the option's letter from the given choices directly."
|
84 |
+
|
85 |
+
elif dataset in ["pope", "chartqa"]:
|
86 |
+
question = question + "\nAnswer the question using a single word or phrase."
|
87 |
+
|
88 |
+
elif dataset in ["hallusionbench"]:
|
89 |
+
if "Please answer yes or no." not in question:
|
90 |
+
question = question + "\nPlease answer yes or no."
|
91 |
+
|
92 |
+
qa_prompt = make_human_string(prompt_rule["user_start"]+question+prompt_rule["user_end"],
|
93 |
+
prompt_rule["assistant_start"],
|
94 |
+
split=prompt_rule["split"])
|
95 |
+
|
96 |
+
return qa_prompt
|
97 |
+
|
98 |
+
def make_human_string(*args, split):
|
99 |
+
out = ''
|
100 |
+
for i, arg in enumerate(args):
|
101 |
+
out += arg
|
102 |
+
if i != len(args)-1:
|
103 |
+
out += split
|
104 |
+
return out
|
105 |
+
|
106 |
+
def get_max_new_tokens(data_name):
|
107 |
+
if data_name.lower() in ["mme", "pope", "sqa", "mmbench", "mmbench_cn", \
|
108 |
+
"mmbench_dev","mmbench_cn_dev", "seed", "seed-2-plus", \
|
109 |
+
"qbench", "ai2d", "mmstar", "chartqa", "hallusionbench", \
|
110 |
+
"cvbench", "blink"]:
|
111 |
+
return 5
|
112 |
+
elif data_name.lower() in ["llava", "llava_wilder", "mm-vet", "mm-vet-v2"]:
|
113 |
+
return 1024
|
114 |
+
elif data_name.lower() in ["mathvista", "mathverse", "visualwebbench"]:
|
115 |
+
return 512
|
116 |
+
else:
|
117 |
+
raise Exception("Check Data Name!")
|
118 |
+
|
119 |
+
class ScaledDotProductAttention(nn.Module):
|
120 |
+
|
121 |
+
def forward(self, query, key, value):
|
122 |
+
dk = query.size()[-1]
|
123 |
+
scores = query.matmul(key.transpose(-2, -1)) / math.sqrt(dk)
|
124 |
+
attention = F.softmax(scores, dim=-1)
|
125 |
+
return attention.matmul(value)
|
126 |
+
|
127 |
+
class XAttention(nn.Module):
|
128 |
+
|
129 |
+
def __init__(self,
|
130 |
+
in_features,
|
131 |
+
activation=F.gelu,
|
132 |
+
eta=1e-4):
|
133 |
+
"""XAttention attention.
|
134 |
+
:param in_features: Size of each input sample.
|
135 |
+
:param activation: The activation after each linear transformation.
|
136 |
+
"""
|
137 |
+
super(XAttention, self).__init__()
|
138 |
+
self.in_features = in_features
|
139 |
+
self.activation = activation
|
140 |
+
self.linear_q = nn.Linear(in_features, in_features, False)
|
141 |
+
self.linear_k = nn.Linear(in_features, in_features, False)
|
142 |
+
self.linear_v = nn.Linear(in_features, in_features, False)
|
143 |
+
self.linear_o = nn.Linear(in_features, in_features, False)
|
144 |
+
self.eta = eta
|
145 |
+
|
146 |
+
def forward(self, q, k, v, is_residual=False):
|
147 |
+
_q, _k, _v = self.linear_q(q), self.linear_k(k), self.linear_v(v)
|
148 |
+
if self.activation is not None:
|
149 |
+
_q = self.activation(_q)
|
150 |
+
_k = self.activation(_k)
|
151 |
+
_v = self.activation(_v)
|
152 |
+
y = ScaledDotProductAttention()(_q, _k, _v)
|
153 |
+
y = self.linear_o(y)
|
154 |
+
if self.activation is not None: y = self.activation(y)
|
155 |
+
return q + self.eta*y if is_residual else self.eta*y
|
156 |
+
|
157 |
+
def pixel_shuffle(x, scale_factor=0.5):
|
158 |
+
n, w, h, c = x.size()
|
159 |
+
# N, W, H, C --> N, W, H * scale, C // scale
|
160 |
+
x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
|
161 |
+
# N, W, H * scale, C // scale --> N, H * scale, W, C // scale
|
162 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
163 |
+
# N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
|
164 |
+
x = x.view(n, int(h * scale_factor), int(w * scale_factor),
|
165 |
+
int(c / (scale_factor * scale_factor)))
|
166 |
+
x = x.permute(0, 2, 1, 3).contiguous()
|
167 |
+
return x
|
168 |
+
|
169 |
+
import torchvision.transforms as T
|
170 |
+
from torchvision.transforms.functional import InterpolationMode
|
171 |
+
IMAGENET_MEAN = (0.485, 0.456, 0.406)
|
172 |
+
IMAGENET_STD = (0.229, 0.224, 0.225)
|
173 |
+
def build_transform(input_size):
|
174 |
+
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD
|
175 |
+
transform = T.Compose([
|
176 |
+
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
|
177 |
+
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
|
178 |
+
T.ToTensor(),
|
179 |
+
T.Normalize(mean=MEAN, std=STD)
|
180 |
+
])
|
181 |
+
return transform
|
182 |
+
dynamic_transform = build_transform(input_size=448)
|
183 |
+
|
184 |
+
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
|
185 |
+
best_ratio_diff = float('inf')
|
186 |
+
best_ratio = (1, 1)
|
187 |
+
area = width * height
|
188 |
+
for ratio in target_ratios:
|
189 |
+
target_aspect_ratio = ratio[0] / ratio[1]
|
190 |
+
ratio_diff = abs(aspect_ratio - target_aspect_ratio)
|
191 |
+
if ratio_diff < best_ratio_diff:
|
192 |
+
best_ratio_diff = ratio_diff
|
193 |
+
best_ratio = ratio
|
194 |
+
elif ratio_diff == best_ratio_diff:
|
195 |
+
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
|
196 |
+
best_ratio = ratio
|
197 |
+
return best_ratio
|
198 |
+
|
199 |
+
def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=True):
|
200 |
+
image = to_pil_image(image)
|
201 |
+
orig_width, orig_height = image.size
|
202 |
+
aspect_ratio = orig_width / orig_height
|
203 |
+
|
204 |
+
# calculate the existing image aspect ratio
|
205 |
+
target_ratios = set(
|
206 |
+
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
|
207 |
+
i * j <= max_num and i * j >= min_num)
|
208 |
+
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1])
|
209 |
+
|
210 |
+
# find the closest aspect ratio to the target
|
211 |
+
target_aspect_ratio = find_closest_aspect_ratio(
|
212 |
+
aspect_ratio, target_ratios, orig_width, orig_height, image_size)
|
213 |
+
|
214 |
+
# calculate the target width and height
|
215 |
+
target_width = image_size * target_aspect_ratio[0]
|
216 |
+
target_height = image_size * target_aspect_ratio[1]
|
217 |
+
blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
|
218 |
+
|
219 |
+
# resize the image
|
220 |
+
resized_img = image.resize((target_width, target_height))
|
221 |
+
processed_images = []
|
222 |
+
for i in range(blocks):
|
223 |
+
box = (
|
224 |
+
(i % (target_width // image_size)) * image_size,
|
225 |
+
(i // (target_width // image_size)) * image_size,
|
226 |
+
((i % (target_width // image_size)) + 1) * image_size,
|
227 |
+
((i // (target_width // image_size)) + 1) * image_size
|
228 |
+
)
|
229 |
+
# split the image
|
230 |
+
split_img = resized_img.crop(box)
|
231 |
+
processed_images.append(split_img)
|
232 |
+
assert len(processed_images) == blocks
|
233 |
+
if use_thumbnail and len(processed_images) != 1:
|
234 |
+
thumbnail_img = image.resize((image_size, image_size))
|
235 |
+
processed_images.append(thumbnail_img)
|
236 |
+
return processed_images
|
237 |
+
|
238 |
+
def concat_images_horizontally_with_margin(image_tensors, margin=10):
|
239 |
+
images = [to_pil_image(xx) for xx in image_tensors]
|
240 |
+
max_height = max(image.height for image in images)
|
241 |
+
total_width = sum(image.width for image in images) + margin * (len(images) - 1)
|
242 |
+
# Create a new image with a black background
|
243 |
+
new_image = Image.new('RGB', (total_width, max_height), (0, 0, 0))
|
244 |
+
|
245 |
+
x_offset = 0
|
246 |
+
for image in images:
|
247 |
+
# Calculate padding to center the image vertically
|
248 |
+
y_offset = (max_height - image.height) // 2
|
249 |
+
new_image.paste(image, (x_offset, y_offset))
|
250 |
+
x_offset += image.width + margin # Add margin after each image except the last one
|
251 |
+
return pil_to_tensor(new_image)
|