menikev commited on
Commit
aef147c
1 Parent(s): 4300df6

Upload prediction_sinhala.py

Browse files
Files changed (1) hide show
  1. models/prediction_sinhala.py +235 -0
models/prediction_sinhala.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from google.colab import drive
2
+ drive.mount('/content/drive')
3
+ ! pip install faknow
4
+
5
+ from typing import List, Optional, Tuple
6
+
7
+ import torch
8
+ from torch import Tensor
9
+ from torch import nn
10
+ from transformers import RobertaModel
11
+
12
+ from faknow.model.layers.layer import TextCNNLayer
13
+ from faknow.model.model import AbstractModel
14
+ import pandas as pd
15
+
16
+
17
+ class _MLP(nn.Module):
18
+ def __init__(self,
19
+ input_dim: int,
20
+ embed_dims: List[int],
21
+ dropout_rate: float,
22
+ output_layer=True):
23
+ super().__init__()
24
+ layers = list()
25
+ for embed_dim in embed_dims:
26
+ layers.append(nn.Linear(input_dim, embed_dim))
27
+ layers.append(nn.BatchNorm1d(embed_dim))
28
+ layers.append(nn.ReLU())
29
+ layers.append(nn.Dropout(p=dropout_rate))
30
+ input_dim = embed_dim
31
+ if output_layer:
32
+ layers.append(torch.nn.Linear(input_dim, 1))
33
+ self.mlp = torch.nn.Sequential(*layers)
34
+
35
+ def forward(self, x: Tensor) -> Tensor:
36
+ """
37
+
38
+ Args:
39
+ x (Tensor): shared feature from domain and text, shape=(batch_size, embed_dim)
40
+
41
+ """
42
+ return self.mlp(x)
43
+
44
+
45
+ class _MaskAttentionLayer(torch.nn.Module):
46
+ """
47
+ Compute attention layer
48
+ """
49
+ def __init__(self, input_size: int):
50
+ super(_MaskAttentionLayer, self).__init__()
51
+ self.attention_layer = torch.nn.Linear(input_size, 1)
52
+
53
+ def forward(self,
54
+ inputs: Tensor,
55
+ mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
56
+ weights = self.attention_layer(inputs).view(-1, inputs.size(1))
57
+ if mask is not None:
58
+ weights = weights.masked_fill(mask == 0, float("-inf"))
59
+ weights = torch.softmax(weights, dim=-1).unsqueeze(1)
60
+ outputs = torch.matmul(weights, inputs).squeeze(1)
61
+ return outputs, weights
62
+
63
+
64
+ class MDFEND(AbstractModel):
65
+ r"""
66
+ MDFEND: Multi-domain Fake News Detection, CIKM 2021
67
+ paper: https://dl.acm.org/doi/10.1145/3459637.3482139
68
+ code: https://github.com/kennqiang/MDFEND-Weibo21
69
+ """
70
+ def __init__(self,
71
+ pre_trained_bert_name: str,
72
+ domain_num: int,
73
+ mlp_dims: Optional[List[int]] = None,
74
+ dropout_rate=0.2,
75
+ expert_num=5):
76
+ """
77
+
78
+ Args:
79
+ pre_trained_bert_name (str): the name or local path of pre-trained bert model
80
+ domain_num (int): total number of all domains
81
+ mlp_dims (List[int]): a list of the dimensions in MLP layer, if None, [384] will be taken as default, default=384
82
+ dropout_rate (float): rate of Dropout layer, default=0.2
83
+ expert_num (int): number of experts also called TextCNNLayer, default=5
84
+ """
85
+ super(MDFEND, self).__init__()
86
+ self.domain_num = domain_num
87
+ self.expert_num = expert_num
88
+ self.bert = RobertaModel.from_pretrained(
89
+ pre_trained_bert_name).requires_grad_(False)
90
+ self.embedding_size = self.bert.config.hidden_size
91
+ self.loss_func = nn.BCELoss()
92
+ if mlp_dims is None:
93
+ mlp_dims = [384]
94
+
95
+ filter_num = 64
96
+ filter_sizes = [1, 2, 3, 5, 10]
97
+ experts = [
98
+ TextCNNLayer(self.embedding_size, filter_num, filter_sizes)
99
+ for _ in range(self.expert_num)
100
+ ]
101
+ self.experts = nn.ModuleList(experts)
102
+
103
+ self.gate = nn.Sequential(
104
+ nn.Linear(self.embedding_size * 2, mlp_dims[-1]), nn.ReLU(),
105
+ nn.Linear(mlp_dims[-1], self.expert_num), nn.Softmax(dim=1))
106
+
107
+ self.attention = _MaskAttentionLayer(self.embedding_size)
108
+
109
+ self.domain_embedder = nn.Embedding(num_embeddings=self.domain_num,
110
+ embedding_dim=self.embedding_size)
111
+ self.classifier = _MLP(320, mlp_dims, dropout_rate)
112
+
113
+ def forward(self, token_id: Tensor, mask: Tensor,
114
+ domain: Tensor) -> Tensor:
115
+ """
116
+
117
+ Args:
118
+ token_id (Tensor): token ids from bert tokenizer, shape=(batch_size, max_len)
119
+ mask (Tensor): mask from bert tokenizer, shape=(batch_size, max_len)
120
+ domain (Tensor): domain id, shape=(batch_size,)
121
+
122
+ Returns:
123
+ FloatTensor: the prediction of being fake, shape=(batch_size,)
124
+ """
125
+ text_embedding = self.bert(token_id,
126
+ attention_mask=mask).last_hidden_state
127
+ attention_feature, _ = self.attention(text_embedding, mask)
128
+
129
+ domain_embedding = self.domain_embedder(domain.view(-1, 1)).squeeze(1)
130
+
131
+ gate_input = torch.cat([domain_embedding, attention_feature], dim=-1)
132
+ gate_output = self.gate(gate_input)
133
+
134
+ shared_feature = 0
135
+ for i in range(self.expert_num):
136
+ expert_feature = self.experts[i](text_embedding)
137
+ shared_feature += (expert_feature * gate_output[:, i].unsqueeze(1))
138
+
139
+ label_pred = self.classifier(shared_feature)
140
+
141
+ return torch.sigmoid(label_pred.squeeze(1))
142
+
143
+ def calculate_loss(self, data) -> Tensor:
144
+ """
145
+ calculate loss via BCELoss
146
+
147
+ Args:
148
+ data (dict): batch data dict
149
+
150
+ Returns:
151
+ loss (Tensor): loss value
152
+ """
153
+
154
+ token_ids = data['text']['token_id']
155
+ masks = data['text']['mask']
156
+ domains = data['domain']
157
+ labels = data['label']
158
+ output = self.forward(token_ids, masks, domains)
159
+ return self.loss_func(output, labels.float())
160
+
161
+ def predict(self, data_without_label) -> Tensor:
162
+ """
163
+ predict the probability of being fake news
164
+
165
+ Args:
166
+ data_without_label (Dict[str, Any]): batch data dict
167
+
168
+ Returns:
169
+ Tensor: one-hot probability, shape=(batch_size, 2)
170
+ """
171
+
172
+ token_ids = data_without_label['text']['token_id']
173
+ masks = data_without_label['text']['mask']
174
+ domains = data_without_label['domain']
175
+
176
+
177
+ output_prob = self.forward(token_ids, masks,domains)
178
+
179
+ return output_prob
180
+ from faknow.data.dataset.text import TextDataset
181
+ from faknow.data.process.text_process import TokenizerFromPreTrained
182
+ from faknow.evaluate.evaluator import Evaluator
183
+
184
+ import torch
185
+ from torch.utils.data import DataLoader
186
+ testing_path = "/content/drive/MyDrive/sinhala-dataset/test_data.json"
187
+
188
+ df = pd.read_json(testing_path)
189
+ df.head()
190
+ df =df[:100]
191
+ df["label"] = int(0)
192
+ df.head()
193
+ print(len(df))
194
+ path = '/content/drive/MyDrive/sinhala-dataset'
195
+ testing_json = "/testing.json"
196
+ df.to_json(path + testing_json, orient='records')
197
+
198
+ MODEL_SAVE_PATH = "/content/drive/MyDrive/models-path-improvement/last-epoch-model-2024-03-08-15_34_03_6.pth"
199
+
200
+ max_len, bert = 160 , 'sinhala-nlp/sinbert-sold-si'
201
+ tokenizer = TokenizerFromPreTrained(max_len, bert)
202
+
203
+ # dataset
204
+ batch_size = 100
205
+
206
+
207
+ testing_path = path + testing_json
208
+
209
+ testing_set = TextDataset(testing_path, ['text'], tokenizer)
210
+ testing_loader = DataLoader(testing_set, batch_size, shuffle=False)
211
+
212
+ # prepare model
213
+ domain_num = 3
214
+
215
+ model = MDFEND(bert, domain_num , expert_num=18 , mlp_dims = [5080 ,4020, 3010, 2024 ,1012 ,606 , 400])
216
+ model.load_state_dict(torch.load(f=MODEL_SAVE_PATH, map_location=torch.device('cpu')))
217
+
218
+
219
+
220
+ outputs = []
221
+ for batch_data in testing_loader:
222
+ outputs.append(model.predict(batch_data))
223
+ outputs
224
+ # 1 ====> offensive
225
+ # 0 ====> not offensive
226
+ label = []
227
+ for output in outputs:
228
+ for out in output:
229
+ output_prob = out.item()
230
+ if output_prob >= 0.5:
231
+ label.append(1)
232
+ else:
233
+ label.append(0)
234
+
235
+ label