File size: 1,603 Bytes
7f341be f17519e 7f341be f17519e 7f341be f17519e 7f341be f17519e 635c5ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import torch
from transformers import BertModel
class LanguageIdentifier(torch.nn.Module):
def __init__(self):
super().__init__()
self.portuguese_bert = BertModel.from_pretrained(
"neuralmind/bert-large-portuguese-cased")
self.linear_layer = torch.nn.Sequential(
torch.nn.Dropout(p=0.2),
torch.nn.Linear(self.portuguese_bert.config.hidden_size, 1),
)
def forward(self, input_ids, attention_mask):
# (Batch_Size,Sequence Length, Hidden_Size)
outputs = self.portuguese_bert(
input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
outputs = self.linear_layer(outputs)
return outputs
class Ensembler(torch.nn.Module):
def __init__(self):
super().__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def forward(self, input_ids, attention_mask):
outputs = []
for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
specialist = LanguageIdentifier()
specialist.load_state_dict(torch.load(f"models/{domain}.pt", map_location=self.device))
specialist.eval()
specialist.to(self.device)
outputs.append(specialist(input_ids, attention_mask))
# Remove the specialist from the GPU
specialist.cpu()
del specialist
outputs = torch.cat(outputs, dim=1)
return torch.mean(outputs, dim=1).unsqueeze(1) |