Update modeling_moe_mistral.py
Browse files- modeling_moe_mistral.py +7 -7
modeling_moe_mistral.py
CHANGED
@@ -29,12 +29,12 @@ import torch.utils.checkpoint
|
|
29 |
from torch import nn
|
30 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
|
32 |
-
from
|
33 |
-
from
|
34 |
-
from
|
35 |
-
from
|
36 |
-
from
|
37 |
-
from
|
38 |
add_start_docstrings,
|
39 |
add_start_docstrings_to_model_forward,
|
40 |
is_flash_attn_2_available,
|
@@ -42,7 +42,7 @@ from ...utils import (
|
|
42 |
logging,
|
43 |
replace_return_docstrings,
|
44 |
)
|
45 |
-
from .
|
46 |
|
47 |
|
48 |
|
|
|
29 |
from torch import nn
|
30 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
31 |
|
32 |
+
from transformers.activations import ACT2FN
|
33 |
+
from transformers.cache_utils import Cache, DynamicCache
|
34 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
|
35 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
36 |
+
from transformers.modeling_utils import PreTrainedModel
|
37 |
+
from transformers.utils import (
|
38 |
add_start_docstrings,
|
39 |
add_start_docstrings_to_model_forward,
|
40 |
is_flash_attn_2_available,
|
|
|
42 |
logging,
|
43 |
replace_return_docstrings,
|
44 |
)
|
45 |
+
from .configuration_moe_mistral import MixtralConfig
|
46 |
|
47 |
|
48 |
|