File size: 6,451 Bytes
13d61ff af47af9 13d61ff bf3d775 13d61ff af47af9 13d61ff af47af9 bf3d775 13d61ff af47af9 bf3d775 13d61ff af47af9 13d61ff af47af9 13d61ff af47af9 13d61ff af47af9 c0d7085 140a2ce c0d7085 140a2ce c0d7085 140a2ce 13d61ff 140a2ce af47af9 13d61ff c0d7085 13d61ff c0d7085 13d61ff c0d7085 13d61ff af47af9 13d61ff af47af9 13d61ff af47af9 13d61ff af47af9 13d61ff af47af9 140a2ce c0d7085 140a2ce c0d7085 13d61ff c0d7085 13d61ff af47af9 13d61ff 140a2ce af47af9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
import csv
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import math
import progressbar
device="cpu"
def CreateBar():
global bar
bar = progressbar.ProgressBar(maxval=100, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
tokens = list("azertyuiopqsdfghjklmwxcvbnäüöß—– ")
tokensdict = {}
for i in range(len(tokens)):
tokensdict.update({tokens[i]: [0] * i + [0] * (len(tokens) - (i + 1))})
# Ouvrir le fichier CSV
with open("C:\\Users\\marc2\\Downloads\\7eaaf0e22461b505c749e268c0b72bc4-12ebe211a929f039791dfeaa1a019b64cadddaf1\\7eaaf0e22461b505c749e268c0b72bc4-12ebe211a929f039791dfeaa1a019b64cadddaf1\\top-german-verbs.csv", 'r', encoding="utf-8") as file:
# Créer un objet lecteur CSV
reader = [i for i in csv.reader(file)][1:]
class CSVDataset(Dataset):
def __init__(self, features, labels):
self.features = features
self.labels = labels
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
sample = self.features[idx], self.labels[idx]
return sample
# Supposons que vous ayez vos données sous forme de listes
features = []
labels = []
padding=len(tokens)
for i in reader:
k = []
for j in i[2]:
k += [tokens.index(j)]
#k += [-1] * (25 - len(k))
features += [torch.Tensor(k)]
k = []
for j in i[8]:
k += [tokens.index(j)]
#k += [-1] * (25 - len(k))
labels += [torch.Tensor(k)]
MyDataset = CSVDataset(features=features, labels=labels)
class TransformerModel(nn.Module):
def __init__(self, vocab_size, emb_dim, nhead, num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=0.1):
super().__init__()
self.custom_embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=padding).to(device)
self.pos_encoder = PositionalEncoding(emb_dim, dropout).to(device)
encoder_layer = nn.TransformerEncoderLayer(emb_dim, nhead, dim_feedforward, dropout, batch_first=True).to(device)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = nn.TransformerDecoderLayer(emb_dim, nhead, dim_feedforward, dropout, batch_first=True).to(device)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
self.output_layer = nn.Linear(emb_dim, vocab_size).to(device)
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None, src_key_padding_mask=None, tgt_key_padding_mask=None, memory_key_padding_mask=None):
#print("Source:", src)
#print("Target:", tgt)
src_emb = self.custom_embedding(src.long())
src_emb = self.pos_encoder(src_emb)
#print("Source Embedding:", src_emb.shape)
tgt_emb = self.custom_embedding(tgt.long())
#print("Target Embedding:", tgt_emb.shape)
tgt_emb = self.pos_encoder(tgt_emb)
#print("Target Embedding:", tgt_emb.shape)
encoder_output = self.transformer_encoder(src_emb, src_mask, src_key_padding_mask)
decoder_output = self.transformer_decoder(tgt_emb, encoder_output, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask)
output = self.output_layer(decoder_output[:, -1, :])
#print("Output:",output.shape)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1), :]
return self.dropout(x)
def collate_fn(batch):
inputs = [item[0].to(device) for item in batch]
targets = [item[1].to(device) for item in batch]
inputs = pad_sequence(inputs, batch_first=True, padding_value=padding)
targets = pad_sequence(targets, batch_first=True, padding_value=padding)
return inputs, targets
train_loader = DataLoader(MyDataset, batch_size=32, shuffle=True, collate_fn=collate_fn)
model = TransformerModel(vocab_size=len(tokens)+1, emb_dim=16, nhead=4, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=256)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 100
try:
model.load_state_dict(torch.load("data/PrateritumGPT.pth"))
print("Sucessfully loaded model.")
except:
pass
#print(model(torch.zeros((1,25)).to(device),torch.zeros((1,25)).to(device)))
inp=input("Which verb? ")
src=[[]]
tgt=[[tokens.index(inp[0])]]
for i in inp:
src[0]+=[tokens.index(i)]
str_=inp[0]
for i in range(100):
out=model(torch.Tensor(src).to(device),torch.Tensor(tgt).to(device)).tolist()[0]
Best=0
Best_=tokens.index(" ")
for k,f in enumerate(out):
if f>Best:
Best=f
Best_=k
if Best_==len(tokens):
break
str_+=tokens[Best_]
tgt[0]+=[Best_]
print(str_)
for epoch in range(epochs):
total_loss = 0.0
CreateBar()
bar.start()
for batch_idx, (inputs, targets) in enumerate(train_loader):
#print("",inputs,targets)
targets.to(device)
inputs.to(device)
for i in range(1, targets.shape[1]):
optimizer.zero_grad()
output = model(inputs, targets[:, :i]) # Shifted targets
#print(output.shape)
loss = loss_fn(output, targets[:, i].long()) # Reshape targets
loss.backward()
optimizer.step()
total_loss += loss.item()
mask = targets[:, i] != len(tokens)
targets = targets[mask]
inputs = inputs[mask]
bar.update((batch_idx+1)/len(train_loader)*100)
#print(f"Epoch {epoch + 1}/{epochs}, Batch {batch_idx}/{len(train_loader)}, Loss: {total_loss / (batch_idx + 1)}")
bar.finish()
print(f"Epoch {epoch + 1}/{epochs}, Loss: {total_loss / len(train_loader)}")
torch.save(model.state_dict(), "data/PrateritumGPT.pth")
|