File size: 1,497 Bytes
4306d2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from transformers import PretrainedConfig
# ----------------------------
# Define Lumenspark Configuration
# ----------------------------
class LumensparkConfig(PretrainedConfig):
"""
Configuration class for the Lumenspark model.
Stores model hyperparameters like sequence length, embedding dimension, number of layers, and others.
"""
model_type = "lumenspark"
def __init__(
self,
seq_length=768,
vocab_size=50257,
embed_dim=768,
depth=8,
heads=12,
dropout=1/17,
k=384,
rank=256,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.depth = depth
self.heads = heads
self.seq_length = seq_length
self.dropout = dropout
self.k = k
self.rank = rank
def to_dict(self):
"""
Converts the configuration parameters to a dictionary format.
Useful for saving the configuration or inspecting model settings.
"""
output = super().to_dict()
output.update({
"vocab_size": self.vocab_size,
"embed_dim": self.embed_dim,
"depth": self.depth,
"heads": self.heads,
"seq_length": self.seq_length,
"dropout": self.dropout,
"k": self.k,
"rank": self.rank,
})
return output |