Update modeling_quietstar.py
Browse files- modeling_quietstar.py +7 -7
modeling_quietstar.py
CHANGED
@@ -42,12 +42,12 @@ import torch.utils.checkpoint
|
|
42 |
from torch import nn
|
43 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
44 |
|
45 |
-
from
|
46 |
-
from
|
47 |
-
from
|
48 |
-
from
|
49 |
-
from
|
50 |
-
from
|
51 |
add_start_docstrings,
|
52 |
add_start_docstrings_to_model_forward,
|
53 |
is_flash_attn_2_available,
|
@@ -55,7 +55,7 @@ from ...utils import (
|
|
55 |
logging,
|
56 |
replace_return_docstrings,
|
57 |
)
|
58 |
-
from .
|
59 |
|
60 |
|
61 |
if is_flash_attn_2_available():
|
|
|
42 |
from torch import nn
|
43 |
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
44 |
|
45 |
+
from transformers.activations import ACT2FN
|
46 |
+
from transformers.cache_utils import Cache, DynamicCache
|
47 |
+
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
|
48 |
+
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
|
49 |
+
from transformers.modeling_utils import PreTrainedModel
|
50 |
+
from transformers.utils import (
|
51 |
add_start_docstrings,
|
52 |
add_start_docstrings_to_model_forward,
|
53 |
is_flash_attn_2_available,
|
|
|
55 |
logging,
|
56 |
replace_return_docstrings,
|
57 |
)
|
58 |
+
from .configuration_quietstar import MistralConfig
|
59 |
|
60 |
|
61 |
if is_flash_attn_2_available():
|