import torch from datasets import load_dataset, Dataset from transformers import BertTokenizerFast import pandas as pd from imblearn.under_sampling import RandomUnderSampler import logging import os def balance_data(dataset): df = dataset.to_pandas() logging.info(f"Balancing {df['label'].value_counts()}") rus = RandomUnderSampler(random_state=42, replacement=True) X_resampled, y_resampled = rus.fit_resample( df['text'].to_numpy().reshape(-1, 1), df['label'].to_numpy()) df = pd.DataFrame( {'text': X_resampled.flatten(), 'label': y_resampled}) logging.info(f"After balancing: {df['label'].value_counts()}") return Dataset.from_pandas(df) def tokenize(dataset): tokenizer = BertTokenizerFast.from_pretrained("neuralmind/bert-large-portuguese-cased") dataset = dataset.map(lambda example: tokenizer( example["text"], truncation=True, padding="max_length", max_length=512)) return dataset # This function supports the Notebook version of LID. No usage elsewhere. def tokenize_single_document(text): tokenizer = BertTokenizerFast.from_pretrained("neuralmind/bert-large-portuguese-cased") return tokenizer(text, truncation=True, padding="max_length", max_length=512) def load_dataloader(domain): logging.info(f"Loading {domain} dataset...") if domain == 'dslcc': dataset = load_dataset("arubenruben/portuguese_dslcc") else: dataset = load_dataset("Random-Mary-Smith/port_data_random", domain) DEBUG = (os.getenv('DEBUG', 'False') == 'True') dataset['train'] = balance_data(dataset['train']) dataset['test'] = dataset['test'].select(range(min(len(dataset['test']), 10_000))) for split in ['train', 'test']: if DEBUG: logging.info("DEBUG MODE: Loading only 100 samples") dataset[split] = dataset[split].select(range(min(len(dataset[split]), 50))) dataset = tokenize(dataset) dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label']) # Create Dataloaders train_dataloader = torch.utils.data.DataLoader(dataset['train'], batch_size=int(os.getenv('BATCH_SIZE')), shuffle=True) test_dataloader = torch.utils.data.DataLoader(dataset['test'], batch_size=int(os.getenv('BATCH_SIZE')), shuffle=False) return train_dataloader, test_dataloader