aimv2-4b-ve / configuration_aimv2.py
lucasjin's picture
Upload folder using huggingface_hub
08b3cf0 verified
from typing import Any
from transformers.configuration_utils import PretrainedConfig
from transformers import Qwen2Config
__all__ = ["AIMv2Config", "MonoConfig"]
class AIMv2Config(PretrainedConfig):
model_type: str = "aimv2"
def __init__(
self,
hidden_size: int = 1024,
intermediate_size: int = 2816,
num_hidden_layers: int = 24,
num_attention_heads: int = 8,
num_channels: int = 3,
image_size: int = 224,
patch_size: int = 14,
rms_norm_eps: float = 1e-5,
attention_dropout: float = 0.0,
projection_dropout: float = 0.0,
qkv_bias: bool = False,
use_bias: bool = False,
text_config=None,
**kwargs: Any,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
self.projection_dropout = projection_dropout
self.qkv_bias = qkv_bias
self.use_bias = use_bias
class MonoConfig(Qwen2Config):
model_type = "mono"
is_composition = False
def __init__(
self,
vision_config=None,
ignore_index=-100,
**kwargs,
):
self.ignore_index = ignore_index
if vision_config is not None:
vision_config = AIMv2Config(**vision_config)
self.vision_config = vision_config
super().__init__(**kwargs)