bird-of-paradise commited on
Commit
743f7a1
·
verified ·
1 Parent(s): d9aa228

Adding transpose to att_output before concatenate all attention heads

Browse files
Transformer_Implementation_Tutorial.ipynb CHANGED
@@ -310,6 +310,7 @@
310
  " att_output = torch.matmul(att_score, V_state)\n",
311
  " \n",
312
  " # concatinate all attention heads\n",
 
313
  " att_output = att_output.contiguous().view(batch_size, seq_len, self.num_head*self.d_head) \n",
314
  " \n",
315
  " # final linear transformation to the concatenated output\n",
@@ -792,7 +793,6 @@
792
  " self.num_head = num_head\n",
793
  " self.dropout = dropout\n",
794
  " self.bias = bias\n",
795
- "\n",
796
  " \n",
797
  " # Encoder stack\n",
798
  " self.encoder_stack = nn.ModuleList([ TransformerEncoder(\n",
@@ -820,7 +820,6 @@
820
  " encoder_output = encoder(embed_input = encoder_output, padding_mask = padding_mask)\n",
821
  " decoder_output = decoder(embed_input = decoder_output, cross_input =encoder_output, padding_mask=padding_mask)\n",
822
  " \n",
823
- " \n",
824
  " return decoder_output"
825
  ]
826
  },
 
310
  " att_output = torch.matmul(att_score, V_state)\n",
311
  " \n",
312
  " # concatinate all attention heads\n",
313
+ " att_output = att_output.transpose(1, 2)\n",
314
  " att_output = att_output.contiguous().view(batch_size, seq_len, self.num_head*self.d_head) \n",
315
  " \n",
316
  " # final linear transformation to the concatenated output\n",
 
793
  " self.num_head = num_head\n",
794
  " self.dropout = dropout\n",
795
  " self.bias = bias\n",
 
796
  " \n",
797
  " # Encoder stack\n",
798
  " self.encoder_stack = nn.ModuleList([ TransformerEncoder(\n",
 
820
  " encoder_output = encoder(embed_input = encoder_output, padding_mask = padding_mask)\n",
821
  " decoder_output = decoder(embed_input = decoder_output, cross_input =encoder_output, padding_mask=padding_mask)\n",
822
  " \n",
 
823
  " return decoder_output"
824
  ]
825
  },