File size: 1,478 Bytes
240d9e8 6fbe5f8 1ae0666 3884ec7 61cfeda 6fbe5f8 12a9461 01c9468 e6560a9 f848305 08c0006 6fbe5f8 1ae0666 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import torch # Import PyTorch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import DataLoader
from torch import nn
from transformers import AutoModel, AutoTokenizer
class DebertaEvaluator(nn.Module):
def __init__(self):
super().__init__()
self.deberta = AutoModel.from_pretrained('microsoft/deberta-v3-base')
self.dropout = nn.Dropout(0.5)
self.linear = nn.Linear(768, 6)
def forward(self, input_id, mask):
output = self.deberta(input_ids=input_id, attention_mask=mask)
output_pooled = torch.mean(output.last_hidden_state, 1)
dropout_output = self.dropout(output_pooled)
linear_output = self.linear(dropout_output)
return linear_output
def inference(input_text):
saved_model_path = './'
model = torch.load(saved_model_path + 'fine-tuned-model.pt', map_location=torch.device('cpu'))
tokenizer = torch.load(saved_model_path + 'fine-tuned-tokenizer.pt', map_location=torch.device('cpu'))
model.eval()
input = tokenizer(input_text)
input_ids = torch.Tensor(input['input_ids']).to(torch.device('cpu')).long()
input_ids.resize_(1,len(input_ids))
print(input_ids)
mask = torch.Tensor(input['attention_mask']).to(torch.device('cpu'))
mask.resize_(1, len(mask))
output = model(input_ids, mask)
return output.tolist()
if __name__ == "__main__":
inference()
|