Spaces:
Runtime error
Runtime error
File size: 5,324 Bytes
ee21b96 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TransformerXLConfig(FairseqDataclass):
# defaults come from the original Transformer-XL code
cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000])
d_model: int = 500
n_head: int = 10
d_head: int = 50
d_inner: int = 1000
div_val: int = 1
n_layer: int = 12
mem_len: int = 0
clamp_len: int = -1
same_length: bool = False
dropout: float = 0.0
dropatt: float = 0.0
checkpoint_activations: bool = False
offload_activations: bool = False
max_target_positions: int = II("task.max_target_positions")
@register_model("transformer_xl", dataclass=TransformerXLConfig)
class TransformerXLLanguageModel(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerXLConfig, task):
return cls(TransformerXLDecoder(cfg, task))
class TransformerXLDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
try:
from transformers.models.transfo_xl import (
TransfoXLConfig,
TransfoXLLMHeadModel,
)
except ImportError:
from transformers.configuration_transfo_xl import TransfoXLConfig
from transformers.modeling_transfo_xl import TransfoXLLMHeadModel
super().__init__(task.target_dictionary)
self.cfg = cfg
# remove any cutoffs larger than the vocab size
cutoffs = [
cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary)
]
config = TransfoXLConfig(
vocab_size=len(task.target_dictionary),
cutoffs=cutoffs,
d_model=cfg.d_model,
d_embed=cfg.d_model,
n_head=cfg.n_head,
d_head=cfg.d_head,
d_inner=cfg.d_inner,
div_val=cfg.div_val,
n_layer=cfg.n_layer,
mem_len=cfg.mem_len,
clamp_len=cfg.clamp_len,
same_length=cfg.same_length,
dropout=cfg.dropout,
dropatt=cfg.dropatt,
)
logger.info(config)
self.model = TransfoXLLMHeadModel(config)
# Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax``
# which adds ``None`` values to an ``nn.ParameterList``, which is not
# supported in PyTorch. Instead we can replace this with an
# ``nn.ModuleList``, which does support ``None`` values.
try:
if all(p is None for p in self.model.crit.out_projs._parameters.values()):
self.model.crit.out_projs = torch.nn.ModuleList(
[None] * len(self.model.crit.out_projs._parameters)
)
except Exception:
pass
if cfg.checkpoint_activations or cfg.offload_activations:
for i in range(len(self.model.transformer.layers)):
self.model.transformer.layers[i] = checkpoint_wrapper(
self.model.transformer.layers[i],
offload_to_cpu=cfg.offload_activations,
)
# TODO: may save mem to wrap(layer.pos_ff.CoreNet[3])
self._mems = None
def forward(
self,
src_tokens,
src_lengths=None, # unused
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
if incremental_state is not None: # used during inference
mems = self.get_incremental_state(incremental_state, "mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
output = self.model(
input_ids=src_tokens,
mems=mems,
return_dict=False,
)
if len(output) >= 2:
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def max_positions(self):
return self.cfg.max_target_positions
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
mems = self.get_incremental_state(incremental_state, "mems")
if mems is not None:
new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
self.set_incremental_state(incremental_state, "mems", new_mems)
|