File size: 9,597 Bytes
66ededb
 
 
97b7e88
 
 
 
66ededb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97b7e88
66ededb
97b7e88
 
 
 
 
 
 
 
66ededb
 
97b7e88
 
753da27
 
9798ca5
97b7e88
66ededb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97b7e88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66ededb
f442c21
97b7e88
f442c21
 
66ededb
f442c21
97b7e88
f442c21
 
 
 
 
66ededb
9798ca5
66ededb
 
753da27
66ededb
 
f442c21
 
 
9798ca5
f442c21
 
753da27
f442c21
 
66ededb
f442c21
a8af462
754fe61
f442c21
 
 
 
46b2f47
66ededb
754fe61
a8af462
f442c21
9519fdf
66ededb
46b2f47
f442c21
 
56138b3
f442c21
 
66ededb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
import random
import re
import gradio as gr

# hyperparameters
batch_size = 16 # how many independent sequences will we process in parallel?
block_size = 32 # what is the maximum context length for predictions?
max_iters = 5000
eval_interval = 100
learning_rate = 1e-3
device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_iters = 200
n_embd = 64
n_head = 4
n_layer = 4
dropout = 0.0
# ------------

torch.manual_seed(1337)

class Head(nn.Module):
    """ one head of self-attention """

    def __init__(self, head_size):
        super().__init__()
        self.key = nn.Linear(n_embd, head_size, bias=False)
        self.query = nn.Linear(n_embd, head_size, bias=False)
        self.value = nn.Linear(n_embd, head_size, bias=False)
        self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))

        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        B,T,C = x.shape
        k = self.key(x)   # (B,T,C)
        q = self.query(x) # (B,T,C)
        # compute attention scores ("affinities")
        wei = q @ k.transpose(-2,-1) * C**-0.5 # (B, T, C) @ (B, C, T) -> (B, T, T)
        wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
        wei = F.softmax(wei, dim=-1) # (B, T, T)
        wei = self.dropout(wei)
        # perform the weighted aggregation of the values
        v = self.value(x) # (B,T,C)
        out = wei @ v # (B, T, T) @ (B, T, C) -> (B, T, C)
        return out

class MultiHeadAttention(nn.Module):
    """ multiple heads of self-attention in parallel """

    def __init__(self, num_heads, head_size):
        super().__init__()
        self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
        self.proj = nn.Linear(n_embd, n_embd)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        out = torch.cat([h(x) for h in self.heads], dim=-1)
        out = self.dropout(self.proj(out))
        return out

class FeedFoward(nn.Module):
    """ a simple linear layer followed by a non-linearity """

    def __init__(self, n_embd):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(n_embd, 4 * n_embd),
            nn.ReLU(),
            nn.Linear(4 * n_embd, n_embd),
            nn.Dropout(dropout),
        )

    def forward(self, x):
        return self.net(x)

class Block(nn.Module):
    """ Transformer block: communication followed by computation """

    def __init__(self, n_embd, n_head):
        # n_embd: embedding dimension, n_head: the number of heads we'd like
        super().__init__()
        head_size = n_embd // n_head
        self.sa = MultiHeadAttention(n_head, head_size)
        self.ffwd = FeedFoward(n_embd)
        self.ln1 = nn.LayerNorm(n_embd)
        self.ln2 = nn.LayerNorm(n_embd)

    def forward(self, x):
        x = x + self.sa(self.ln1(x))
        x = x + self.ffwd(self.ln2(x))
        return x

# super simple bigram model
class BigramLanguageModel(nn.Module):
    def __init__(self, dataset_text, n_embd):
        super().__init__()

        # Compute character-related parameters
        self.chars = sorted(list(set(dataset_text)))
        self.vocab_size = len(self.chars)
        self.stoi = {ch: i for i, ch in enumerate(self.chars)}
        self.itos = {i: ch for ch, i in self.stoi.items()}

        self.token_embedding_table = nn.Embedding(self.vocab_size, n_embd)
        self.position_embedding_table = nn.Embedding(block_size, n_embd)
        self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
        self.ln_f = nn.LayerNorm(n_embd)
        self.lm_head = nn.Linear(n_embd, self.vocab_size)
        self.encode = lambda s: [self.stoi[c] for c in s] # encoder: take a string, output a list of integers
        self.decode = lambda l: ''.join([self.itos[i] for i in l]) # decoder: take a list of integers, output a string

        
    def forward(self, idx, targets=None):
        B, T = idx.shape

        # idx and targets are both (B,T) tensor of integers
        tok_emb = self.token_embedding_table(idx) # (B,T,C)
        pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
        x = tok_emb + pos_emb # (B,T,C)
        x = self.blocks(x) # (B,T,C)
        x = self.ln_f(x) # (B,T,C)
        logits = self.lm_head(x) # (B,T,vocab_size)

        if targets is None:
            loss = None
        else:
            B, T, C = logits.shape
            logits = logits.view(B*T, C)
            targets = targets.view(B*T)
            loss = F.cross_entropy(logits, targets)

        return logits, loss

    def generate(self, idx, max_new_tokens):
        # idx is (B, T) array of indices in the current context
        for _ in range(max_new_tokens):
            # crop idx to the last block_size tokens
            idx_cond = idx[:, -block_size:]
            # get the predictions
            logits, loss = self(idx_cond)
            # focus only on the last time step
            logits = logits[:, -1, :] # becomes (B, C)
            # apply softmax to get probabilities
            probs = F.softmax(logits, dim=-1) # (B, C)
            # sample from the distribution
            idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
            # append sampled index to the running sequence
            idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
        return idx

# Reading shakespeare data
with open('input.txt', 'r', encoding='utf-8') as f:
    shakespeare_text = f.read()


# Reading wikipedia data
DATA_PATH = 'wikisent2.txt'
# load wikipedia sentences
with open(DATA_PATH, 'r') as f:
    lines = f.read().splitlines()

# Selecting 250k lines from the dataset.
random.seed(42)
texts = random.choices(lines, k=250000)
del lines

def preprocess(text):
    text = re.sub('@.*?\s+', '', text)  # Remove mentions
    text = re.sub('#.*?\s+', '', text)  # Remove hashtags
    text = re.sub(r'https?:\/\/.*[\r\n]*', '', text)  # Remove URLs
    text = re.sub(r'[^\w\s\'.]', '', text)  # Remove special characters except for single quotes and periods
    text = re.sub('\s+', ' ', text)  # Replace multiple spaces with a single space
    text = re.sub('^\d+\s*|^\d+\.\d+\s*|^\d+\.\d+\.\d+\s*', '', text)  # Remove digits at the start of sentences
    text = text.strip()  # Remove leading and trailing whitespace
    return text

wiki_text = [preprocess(t) for t in texts]
wiki_text = '\n'.join(wiki_text)

# Load the shakespeaere model
shakespeare_model = BigramLanguageModel(shakespeare_text, n_embd).to(device)  # Initialize an instance of your model
shakespeare_model.load_state_dict(torch.load('shakespeaere_language_model.pth', map_location=torch.device('cpu')))
shakespeare_model.eval()  # Set the model to evaluation mode

# Load the wikipedia model
wikipedia_model = BigramLanguageModel(wiki_text, n_embd).to(device)  # Initialize an instance of your model
wikipedia_model.load_state_dict(torch.load('wikipedia_language_model.pth', map_location=torch.device('cpu')))
wikipedia_model.eval()  # Set the model to evaluation mode


def generate_shakespeare_outputs(prompt=None, max_new_tokens=2000):
  if prompt:
    context = torch.tensor(shakespeare_model.encode(prompt), dtype=torch.long, device=device).view(1, -1)
  else:
    context = torch.zeros((1, 1), dtype=torch.long, device=device)
  text_output = shakespeare_model.decode(shakespeare_model.generate(context, max_new_tokens=max_new_tokens)[0].tolist())
  return text_output


def generate_wikipedia_outputs(prompt=None, max_new_tokens=2000):
  if prompt:
    context = torch.tensor(wikipedia_model.encode(prompt), dtype=torch.long, device=device).view(1, -1)
  else:
    context = torch.zeros((1, 1), dtype=torch.long, device=device)
  text_output = wikipedia_model.decode(wikipedia_model.generate(context, max_new_tokens=max_new_tokens)[0].tolist())
  return text_output
    

title = "Nano GPT"

description1 = "Nano GPT trained on <a href='https://www.kaggle.com/datasets/mikeortman/wikipedia-sentences'>Shakespeare dataset</a>. It is trained on a very small amount of data to understand how GPT's are trained and built. The implementation can be found <a href='https://github.com/karpathy/nanoGPT'>here.</a>"

shakespeare_interface = gr.Interface(generate_shakespeare_outputs,
                    inputs=[gr.Textbox(label="Enter any prompt ", type="text", value="Once upon a time,"),
                            gr.Slider(minimum=100, maximum=5000, step=100, value=2000, label="Max new tokens")],
                    outputs=gr.Textbox(label="Output generated", type="text"), description=description1)

description2 = "Nano GPT trained on <a href='https://github.com/karpathy/char-rnn/blob/6f9487a6fe5b420b7ca9afb0d7c078e37c1d1b4e/data/tinyshakespeare/input.txt'>Wikipedia dataset</a>. It is trained on a very small amount of data to understand how GPT's are trained and built. The implementation can be found <a href='https://github.com/karpathy/nanoGPT'>here.</a>"

wiki_interface = gr.Interface(generate_wikipedia_outputs,
                    inputs=[gr.Textbox(label="Enter any prompt ", type="text", value="James Bond"),
                            gr.Slider(minimum=100, maximum=5000, step=100, value=2000, label="Max new tokens")],
                    outputs=gr.Textbox(label="Output generated", type="text"), description=description2)

demo = gr.TabbedInterface([shakespeare_interface, wiki_interface], tab_names=["Shakespeare Data", "Wikipedia Data"], 
                          title=title)


demo.launch()