|
import torch |
|
from transformers import BertModel |
|
|
|
class Ensembler(torch.nn.Module): |
|
def __init__(self, specialists): |
|
super().__init__() |
|
|
|
self.specialists = specialists |
|
|
|
def forward(self, input_ids, attention_mask): |
|
outputs = torch.cat([specialist(input_ids, attention_mask) |
|
for specialist in self.specialists], dim=1) |
|
|
|
return torch.mean(outputs, dim=1).unsqueeze(1) |
|
|
|
|
|
class LanguageIdentifier(torch.nn.Module): |
|
def __init__(self): |
|
super().__init__() |
|
|
|
self.portuguese_bert = BertModel.from_pretrained("neuralmind/bert-large-portuguese-cased") |
|
|
|
self.linear_layer = torch.nn.Sequential( |
|
torch.nn.Dropout(p=0.2), |
|
torch.nn.Linear(self.portuguese_bert.config.hidden_size, 1), |
|
) |
|
|
|
def forward(self, input_ids, attention_mask): |
|
|
|
|
|
outputs = self.portuguese_bert(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :] |
|
|
|
outputs = self.linear_layer(outputs) |
|
|
|
return outputs |