arubenruben commited on
Commit
f17519e
1 Parent(s): 81c67a1

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +26 -12
model.py CHANGED
@@ -1,35 +1,49 @@
1
  import torch
2
  from transformers import BertModel
3
 
 
4
  class Ensembler(torch.nn.Module):
5
  def __init__(self, specialists):
6
  super().__init__()
7
-
8
  self.specialists = specialists
9
 
10
  def forward(self, input_ids, attention_mask):
11
- outputs = torch.cat([specialist(input_ids, attention_mask)
12
- for specialist in self.specialists], dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  return torch.mean(outputs, dim=1).unsqueeze(1)
15
-
16
 
17
  class LanguageIdentifier(torch.nn.Module):
18
  def __init__(self):
19
  super().__init__()
20
-
21
- self.portuguese_bert = BertModel.from_pretrained("neuralmind/bert-large-portuguese-cased")
22
-
 
23
  self.linear_layer = torch.nn.Sequential(
24
  torch.nn.Dropout(p=0.2),
25
  torch.nn.Linear(self.portuguese_bert.config.hidden_size, 1),
26
  )
27
 
28
  def forward(self, input_ids, attention_mask):
29
-
30
- #(Batch_Size,Sequence Length, Hidden_Size)
31
- outputs = self.portuguese_bert(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
32
-
 
33
  outputs = self.linear_layer(outputs)
34
 
35
- return outputs
 
1
  import torch
2
  from transformers import BertModel
3
 
4
+
5
  class Ensembler(torch.nn.Module):
6
  def __init__(self, specialists):
7
  super().__init__()
 
8
  self.specialists = specialists
9
 
10
  def forward(self, input_ids, attention_mask):
11
+ outputs = []
12
+
13
+ for specialist in self.specialists:
14
+ specialist.eval()
15
+
16
+ specialist.to(torch.device(
17
+ "cuda" if torch.cuda.is_available() else "cpu"))
18
+
19
+ outputs.append(specialist(input_ids, attention_mask))
20
+
21
+ # Remove the specialist from the GPU
22
+ specialist.cpu()
23
+
24
+ outputs = torch.cat(outputs, dim=1)
25
 
26
  return torch.mean(outputs, dim=1).unsqueeze(1)
27
+
28
 
29
  class LanguageIdentifier(torch.nn.Module):
30
  def __init__(self):
31
  super().__init__()
32
+
33
+ self.portuguese_bert = BertModel.from_pretrained(
34
+ "neuralmind/bert-large-portuguese-cased")
35
+
36
  self.linear_layer = torch.nn.Sequential(
37
  torch.nn.Dropout(p=0.2),
38
  torch.nn.Linear(self.portuguese_bert.config.hidden_size, 1),
39
  )
40
 
41
  def forward(self, input_ids, attention_mask):
42
+
43
+ # (Batch_Size,Sequence Length, Hidden_Size)
44
+ outputs = self.portuguese_bert(
45
+ input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
46
+
47
  outputs = self.linear_layer(outputs)
48
 
49
+ return outputs