Spaces:
Sleeping
Sleeping
# coding=utf-8 | |
# The MIT License (MIT) | |
# Copyright (c) Microsoft Corporation | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# The above copyright notice and this permission notice shall be included in all | |
# copies or substantial portions of the Software. | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
# SOFTWARE. | |
""" MiniLM model configuration """ | |
from __future__ import absolute_import, division, print_function, unicode_literals | |
import json | |
import logging | |
import sys | |
from io import open | |
from transformers.configuration_utils import PretrainedConfig | |
logger = logging.getLogger(__name__) | |
MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP = { | |
'minilm-l12-h384-uncased': "https://conversationhub.blob.core.windows.net/beit-share-public/ckpt/minilm-l12-h384-uncased-config.json?sv=2021-10-04&st=2023-06-08T11%3A16%3A02Z&se=2033-06-09T11%3A16%3A00Z&sr=c&sp=r&sig=N4pfCVmSeq4L4tS8QbrFVsX6f6q844eft8xSuXdxU48%3D", | |
} | |
class MinilmConfig(PretrainedConfig): | |
r""" | |
:class:`~transformers.MinilmConfig` is the configuration class to store the configuration of a | |
`MinilmModel`. | |
Arguments: | |
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `MiniLMModel`. | |
hidden_size: Size of the encoder layers and the pooler layer. | |
num_hidden_layers: Number of hidden layers in the Transformer encoder. | |
num_attention_heads: Number of attention heads for each attention layer in | |
the Transformer encoder. | |
intermediate_size: The size of the "intermediate" (i.e., feed-forward) | |
layer in the Transformer encoder. | |
hidden_act: The non-linear activation function (function or string) in the | |
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. | |
hidden_dropout_prob: The dropout probabilitiy for all fully connected | |
layers in the embeddings, encoder, and pooler. | |
attention_probs_dropout_prob: The dropout ratio for the attention | |
probabilities. | |
max_position_embeddings: The maximum sequence length that this model might | |
ever be used with. Typically set this to something large just in case | |
(e.g., 512 or 1024 or 2048). | |
type_vocab_size: The vocabulary size of the `token_type_ids` passed into | |
`MiniLMModel`. | |
initializer_range: The sttdev of the truncated_normal_initializer for | |
initializing all weight matrices. | |
layer_norm_eps: The epsilon used by LayerNorm. | |
""" | |
pretrained_config_archive_map = MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP | |
def __init__(self, | |
vocab_size=28996, | |
hidden_size=768, | |
num_hidden_layers=12, | |
num_attention_heads=12, | |
intermediate_size=3072, | |
hidden_act="gelu", | |
hidden_dropout_prob=0.1, | |
attention_probs_dropout_prob=0.1, | |
max_position_embeddings=512, | |
type_vocab_size=6, | |
initializer_range=0.02, | |
layer_norm_eps=1e-12, | |
source_type_id=0, | |
target_type_id=1, | |
**kwargs): | |
super(MinilmConfig, self).__init__(**kwargs) | |
if isinstance(vocab_size, str) or (sys.version_info[0] == 2 | |
and isinstance(vocab_size, unicode)): | |
with open(vocab_size, "r", encoding='utf-8') as reader: | |
json_config = json.loads(reader.read()) | |
for key, value in json_config.items(): | |
self.__dict__[key] = value | |
elif isinstance(vocab_size, int): | |
self.vocab_size = vocab_size | |
self.hidden_size = hidden_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.hidden_act = hidden_act | |
self.intermediate_size = intermediate_size | |
self.hidden_dropout_prob = hidden_dropout_prob | |
self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
self.max_position_embeddings = max_position_embeddings | |
self.type_vocab_size = type_vocab_size | |
self.initializer_range = initializer_range | |
self.layer_norm_eps = layer_norm_eps | |
self.source_type_id = source_type_id | |
self.target_type_id = target_type_id | |
else: | |
raise ValueError("First argument must be either a vocabulary size (int)" | |
" or the path to a pretrained model config file (str)") | |