|
import torch
|
|
from torch.utils.data import Dataset
|
|
from transformers import AutoTokenizer
|
|
|
|
class TransNARTextDataset(Dataset):
|
|
def __init__(self, data_type, num_samples, max_length, vocab_size, device):
|
|
self.data_type = data_type
|
|
self.num_samples = num_samples
|
|
self.max_length = max_length
|
|
self.vocab_size = vocab_size
|
|
self.device = device
|
|
|
|
|
|
if data_type == 'math':
|
|
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
|
|
elif data_type == 'literature':
|
|
self.tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
|
|
else:
|
|
raise ValueError("data_type must be 'math' or 'literature'")
|
|
|
|
|
|
self.input_ids, self.attention_masks, self.labels = self.generate_data()
|
|
|
|
def __len__(self):
|
|
return self.num_samples
|
|
|
|
def __getitem__(self, idx):
|
|
return self.input_ids[idx], self.attention_masks[idx], self.labels[idx]
|
|
|
|
def generate_data(self):
|
|
input_ids = []
|
|
attention_masks = []
|
|
labels = []
|
|
|
|
for _ in range(self.num_samples):
|
|
if self.data_type == 'math':
|
|
text = self.generate_math_text()
|
|
else:
|
|
text = self.generate_literature_text()
|
|
|
|
|
|
encoded = self.tokenizer.encode_plus(
|
|
text,
|
|
max_length=self.max_length,
|
|
pad_to_max_length=True,
|
|
return_attention_mask=True,
|
|
return_tensors='pt',
|
|
)
|
|
|
|
input_ids.append(encoded['input_ids'])
|
|
attention_masks.append(encoded['attention_mask'])
|
|
labels.append(self.generate_label(text))
|
|
|
|
return torch.stack(input_ids).to(self.device), \
|
|
torch.stack(attention_masks).to(self.device), \
|
|
torch.stack(labels).to(self.device)
|
|
|
|
def generate_math_text(self):
|
|
|
|
pass
|
|
|
|
def generate_literature_text(self):
|
|
|
|
pass
|
|
|
|
def generate_label(self, text):
|
|
|
|
pass |