Update ultravox_model.py
Browse files- ultravox_model.py +2 -8
ultravox_model.py
CHANGED
@@ -34,14 +34,8 @@ class UltravoxModel(transformers.LlamaPreTrainedModel):
|
|
34 |
|
35 |
config_class = UltravoxConfig
|
36 |
config: UltravoxConfig # for type hinting
|
37 |
-
#
|
38 |
-
|
39 |
-
# As such we have to tell is to ignore some keys that are not always in the model
|
40 |
-
_keys_to_ignore_on_load_unexpected = ["audio_tower.*", "language_model.*"]
|
41 |
-
# Usually we load encoder weights from a pretrained model, so we don't want to load the decoder weights
|
42 |
-
# Technically we never hit this issue because these keys are already removed from state_dict() however,
|
43 |
-
# but there's no harm in keeping it here for when we change that behavior.
|
44 |
-
_keys_to_ignore_on_load_missing = ["audio_tower.*"]
|
45 |
|
46 |
def __init__(self, config: UltravoxConfig):
|
47 |
super().__init__(config)
|
|
|
34 |
|
35 |
config_class = UltravoxConfig
|
36 |
config: UltravoxConfig # for type hinting
|
37 |
+
# Usually we load encoder and LLM weights from a pretrained model separately, so they are expected to be missing
|
38 |
+
_keys_to_ignore_on_load_missing = ["audio_tower.*", "language_model.*"]
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
def __init__(self, config: UltravoxConfig):
|
41 |
super().__init__(config)
|