arubenruben commited on
Commit
24f90ea
1 Parent(s): 7f341be

Upload data.py

Browse files
Files changed (1) hide show
  1. data.py +69 -0
data.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from datasets import load_dataset, Dataset
3
+ from transformers import BertTokenizerFast
4
+ import pandas as pd
5
+ from imblearn.under_sampling import RandomUnderSampler
6
+ import logging
7
+ import os
8
+
9
+
10
+ def balance_data(dataset):
11
+ df = dataset.to_pandas()
12
+
13
+ logging.info(f"Balancing {df['label'].value_counts()}")
14
+
15
+ rus = RandomUnderSampler(random_state=42, replacement=True)
16
+
17
+ X_resampled, y_resampled = rus.fit_resample(
18
+ df['text'].to_numpy().reshape(-1, 1), df['label'].to_numpy())
19
+
20
+ df = pd.DataFrame(
21
+ {'text': X_resampled.flatten(), 'label': y_resampled})
22
+
23
+ logging.info(f"After balancing: {df['label'].value_counts()}")
24
+
25
+ return Dataset.from_pandas(df)
26
+
27
+
28
+ def tokenize(dataset):
29
+ tokenizer = BertTokenizerFast.from_pretrained("neuralmind/bert-large-portuguese-cased")
30
+
31
+ dataset = dataset.map(lambda example: tokenizer(
32
+ example["text"], truncation=True, padding="max_length", max_length=512))
33
+
34
+ return dataset
35
+
36
+ # This function supports the Notebook version of LID. No usage elsewhere.
37
+ def tokenize_single_document(text):
38
+ tokenizer = BertTokenizerFast.from_pretrained("neuralmind/bert-large-portuguese-cased")
39
+
40
+ return tokenizer(text, truncation=True, padding="max_length", max_length=512)
41
+
42
+ def load_dataloader(domain):
43
+
44
+ logging.info(f"Loading {domain} dataset...")
45
+
46
+ if domain == 'dslcc':
47
+ dataset = load_dataset("arubenruben/portuguese_dslcc")
48
+ else:
49
+ dataset = load_dataset("Random-Mary-Smith/port_data_random", domain)
50
+
51
+ DEBUG = (os.getenv('DEBUG', 'False') == 'True')
52
+
53
+ dataset['train'] = balance_data(dataset['train'])
54
+
55
+ dataset['test'] = dataset['test'].select(range(min(len(dataset['test']), 10_000)))
56
+
57
+ for split in ['train', 'test']:
58
+ if DEBUG:
59
+ logging.info("DEBUG MODE: Loading only 100 samples")
60
+ dataset[split] = dataset[split].select(range(min(len(dataset[split]), 50)))
61
+
62
+ dataset = tokenize(dataset)
63
+ dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'label'])
64
+
65
+ # Create Dataloaders
66
+ train_dataloader = torch.utils.data.DataLoader(dataset['train'], batch_size=int(os.getenv('BATCH_SIZE')), shuffle=True)
67
+ test_dataloader = torch.utils.data.DataLoader(dataset['test'], batch_size=int(os.getenv('BATCH_SIZE')), shuffle=False)
68
+
69
+ return train_dataloader, test_dataloader