dummy / embeddings.py
arubenruben's picture
Embeddings running
5d81f4d
import torch
import logging
from pathlib import Path
import os
from transformers import BertModel, BertForTokenClassification
import pandas as pd
import evaluate
from datasets import load_dataset
from transformers import BertTokenizerFast
from torch.utils.data import DataLoader
from tqdm import tqdm
def tokenize(dataset):
BERT_MAX_LEN = 512
tokenizer = BertTokenizerFast.from_pretrained(
"neuralmind/bert-base-portuguese-cased", max_length=BERT_MAX_LEN)
dataset = dataset.map(lambda example: tokenizer(
example["text"], truncation=True, padding="max_length", max_length=BERT_MAX_LEN))
return dataset
def create_dataloader(dataset, shuffle=True):
return DataLoader(dataset, batch_size=8, shuffle=shuffle, num_workers=8, drop_last=True)
CURRENT_PATH = Path(__file__).parent
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s', datefmt='%H:%M:%S', filename=os.path.join(CURRENT_PATH, 'out', 'debug_embeddings.txt'), filemode='w')
class LanguageIdentifer(torch.nn.Module):
def __init__(self, mode='horizontal_stacking', pos_layers_to_freeze=0, bertimbau_layers_to_freeze=0):
super().__init__()
self.labels = ['pt-PT', 'pt-BR']
self.portuguese_model = BertModel.from_pretrained(
"neuralmind/bert-base-portuguese-cased")
self.portuguese_pos_tagging_model = BertForTokenClassification.from_pretrained(
"lisaterumi/postagger-portuguese")
for layer in range(bertimbau_layers_to_freeze):
for name, param in self.portuguese_model.named_parameters():
if f".{layer}" in name:
print(f"Freezing Layer {name} of Bertimbau")
param.requires_grad = False
for layer in range(pos_layers_to_freeze):
for name, param in self.portuguese_pos_tagging_model.named_parameters():
if f".{layer}" in name:
print(f"Freezing Layer {name} of POS")
param.requires_grad = False
self.portuguese_pos_tagging_model.classifier = torch.nn.Identity()
self.mode = mode
if self.mode == 'horizontal_stacking':
self.linear = self.common_network(torch.nn.Linear(
self.portuguese_pos_tagging_model.config.hidden_size + self.portuguese_model.config.hidden_size, 512))
elif self.mode == 'bertimbau_only' or self.mode == 'pos_only' or self.mode == 'vertical_sum':
self.linear = self.common_network(torch.nn.Linear(
self.portuguese_model.config.hidden_size, 512))
else:
raise NotImplementedError
def common_network(self, custom_linear):
return torch.nn.Sequential(
custom_linear,
torch.nn.ReLU(),
torch.nn.Dropout(0.2),
torch.nn.Linear(512, 1),
)
def forward(self, input_ids, attention_mask):
#(Batch_Size,Sequence Length, Hidden_Size)
outputs_bert = self.portuguese_model(
input_ids=input_ids, attention_mask=attention_mask).last_hidden_state[:, 0, :]
#(Batch_Size,Sequence Length, Hidden_Size)
outputs_pos = self.portuguese_pos_tagging_model(
input_ids=input_ids, attention_mask=attention_mask).logits[:, 0, :]
if self.mode == 'horizontal_stacking':
outputs = torch.cat((outputs_bert, outputs_pos), dim=1)
elif self.mode == 'bertimbau_only':
outputs = outputs_bert
elif self.mode == 'pos_only':
outputs = outputs_pos
elif self.mode == 'vertical_sum':
outputs = outputs_bert + outputs_pos
outputs = torch.nn.functional.normalize(outputs, p=2, dim=1)
return self.linear(outputs)
def load_models():
models = []
for domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
logging.info(f"Loading {domain} model...")
model = LanguageIdentifer(mode='pos_only')
model.load_state_dict(torch.load(os.path.join(
CURRENT_PATH, 'models', 'embeddings', f'{domain}.pt')))
models.append({
'model': model,
'train_domain': domain,
})
return models
def benchmark(model, debug=False):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
df_result = pd.DataFrame(
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
train_domain = model['train_domain']
model = model['model']
model.to(device)
model.eval()
for test_domain in ['politics', 'news', 'law', 'social_media', 'literature', 'web']:
dataset = load_dataset(
'arubenruben/Portuguese_Language_Identification', test_domain, split='test')
if debug:
logging.info("Debug mode: using only 100 samples")
dataset = dataset.shuffle().select(range(100))
else:
dataset = dataset.shuffle().select(range(min(50_000, len(dataset))))
dataset = tokenize(dataset)
dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
dataset = create_dataloader(dataset)
y = []
with torch.no_grad():
for batch in tqdm(dataset):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
y.extend(model(input_ids, attention_mask).cpu().detach().numpy())
y = [1 if y_ > 0.5 else 0 for y_ in y]
accuracy = evaluate.load('accuracy').compute(
predictions=y, references=dataset['label'])['accuracy']
f1 = evaluate.load('f1').compute(
predictions=y, references=dataset['label'])['f1']
precision = evaluate.load('precision').compute(
predictions=y, references=dataset['label'])['precision']
recall = evaluate.load('recall').compute(
predictions=y, references=dataset['label'])['recall']
df_result = pd.concat([df_result, pd.DataFrame({
'train_domain': [train_domain],
'test_domain': [test_domain],
'accuracy': [accuracy],
'f1': [f1],
'precision': [precision],
'recall': [recall],
})], ignore_index=True)
return df_result
def test():
DEBUG = False
models = load_models()
df_results = pd.DataFrame(
columns=['train_domain', 'test_domain', 'accuracy', 'f1', 'precision', 'recall'])
for model in models:
logging.info(f"Train Domain {model['train_domain']}...")
df_results = pd.concat(
[df_results, benchmark(model, debug=DEBUG)], ignore_index=True)
logging.info("Saving Results...")
df_results.to_json(os.path.join(CURRENT_PATH, 'out', 'embeddings.json'),
orient='records', indent=4, force_ascii=False)
if __name__ == '__main__':
test()