db commited on
Commit
5ad97cb
1 Parent(s): 873dfeb
Files changed (2) hide show
  1. model.py +369 -0
  2. sample.py +89 -0
model.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Full definition of a GPT Language Model, all of it in this single file.
3
+ References:
4
+ 1) the official GPT-2 TensorFlow implementation released by OpenAI:
5
+ https://github.com/openai/gpt-2/blob/master/src/model.py
6
+ 2) huggingface/transformers PyTorch implementation:
7
+ https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py
8
+ """
9
+
10
+ import math
11
+ import inspect
12
+ from dataclasses import dataclass
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ from torch.nn import functional as F
17
+
18
+ # @torch.jit.script # good to enable when not using torch.compile, disable when using (our default)
19
+ def new_gelu(x):
20
+ """
21
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT).
22
+ Reference: Gaussian Error Linear Units (GELU) paper: https://arxiv.org/abs/1606.08415
23
+ """
24
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
25
+
26
+ class LayerNorm(nn.Module):
27
+ """ LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False """
28
+
29
+ def __init__(self, ndim, bias):
30
+ super().__init__()
31
+ self.weight = nn.Parameter(torch.ones(ndim))
32
+ self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
33
+
34
+ def forward(self, input):
35
+ return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
36
+
37
+ class CausalSelfAttention(nn.Module):
38
+
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ assert config.n_embd % config.n_head == 0
42
+ # key, query, value projections for all heads, but in a batch
43
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias)
44
+ # output projection
45
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias)
46
+ # regularization
47
+ self.attn_dropout = nn.Dropout(config.dropout)
48
+ self.resid_dropout = nn.Dropout(config.dropout)
49
+ self.n_head = config.n_head
50
+ self.n_embd = config.n_embd
51
+ self.dropout = config.dropout
52
+ # flash attention make GPU go brrrrr but support is only in PyTorch >= 2.0
53
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention')
54
+ if not self.flash:
55
+ print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
56
+ # causal mask to ensure that attention is only applied to the left in the input sequence
57
+ self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size))
58
+ .view(1, 1, config.block_size, config.block_size))
59
+
60
+ def forward(self, x):
61
+ B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)
62
+
63
+ # calculate query, key, values for all heads in batch and move head forward to be the batch dim
64
+ q, k, v = self.c_attn(x).split(self.n_embd, dim=2)
65
+ k = k.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
66
+ q = q.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
67
+ v = v.view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
68
+
69
+ # causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
70
+ if self.flash:
71
+ # efficient attention using Flash Attention CUDA kernels
72
+ y = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=self.dropout if self.training else 0, is_causal=True)
73
+ else:
74
+ # manual implementation of attention
75
+ att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
76
+ att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float('-inf'))
77
+ att = F.softmax(att, dim=-1)
78
+ att = self.attn_dropout(att)
79
+ y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
80
+ y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
81
+
82
+ # output projection
83
+ y = self.resid_dropout(self.c_proj(y))
84
+ return y
85
+
86
+ class MLP(nn.Module):
87
+
88
+ def __init__(self, config):
89
+ super().__init__()
90
+ self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=config.bias)
91
+ self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=config.bias)
92
+ self.dropout = nn.Dropout(config.dropout)
93
+
94
+ def forward(self, x):
95
+ x = self.c_fc(x)
96
+ x = new_gelu(x)
97
+ x = self.c_proj(x)
98
+ x = self.dropout(x)
99
+ return x
100
+
101
+ class Block(nn.Module):
102
+
103
+ def __init__(self, config):
104
+ super().__init__()
105
+ self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
106
+ self.attn = CausalSelfAttention(config)
107
+ self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
108
+ self.mlp = MLP(config)
109
+
110
+ def forward(self, x):
111
+ x = x + self.attn(self.ln_1(x))
112
+ x = x + self.mlp(self.ln_2(x))
113
+ return x
114
+
115
+ @dataclass
116
+ class GPTConfig:
117
+ block_size: int = 1024
118
+ vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency
119
+ n_layer: int = 12
120
+ n_head: int = 12
121
+ n_embd: int = 768
122
+ dropout: float = 0.0
123
+ bias: bool = True # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster
124
+
125
+ class GPT(nn.Module):
126
+
127
+ def __init__(self, config):
128
+ super().__init__()
129
+ assert config.vocab_size is not None
130
+ assert config.block_size is not None
131
+ self.config = config
132
+
133
+ self.transformer = nn.ModuleDict(dict(
134
+ wte = nn.Embedding(config.vocab_size, config.n_embd),
135
+ wpe = nn.Embedding(config.block_size, config.n_embd),
136
+ drop = nn.Dropout(config.dropout),
137
+ h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
138
+ ln_f = LayerNorm(config.n_embd, bias=config.bias),
139
+ ))
140
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
141
+ # with weight tying when using torch.compile() some warnings get generated:
142
+ # "UserWarning: functional_call was passed multiple values for tied weights.
143
+ # This behavior is deprecated and will be an error in future versions"
144
+ # not 100% sure what this is, so far seems to be harmless. TODO investigate
145
+ self.transformer.wte.weight = self.lm_head.weight # https://paperswithcode.com/method/weight-tying
146
+
147
+ # init all weights
148
+ self.apply(self._init_weights)
149
+ # apply special scaled init to the residual projections, per GPT-2 paper
150
+ for pn, p in self.named_parameters():
151
+ if pn.endswith('c_proj.weight'):
152
+ torch.nn.init.normal_(p, mean=0.0, std=0.02/math.sqrt(2 * config.n_layer))
153
+
154
+ # report number of parameters
155
+ print("number of parameters: %.2fM" % (self.get_num_params()/1e6,))
156
+
157
+ def get_num_params(self, non_embedding=True):
158
+ """
159
+ Return the number of parameters in the model.
160
+ For non-embedding count (default), the position embeddings get subtracted.
161
+ The token embeddings would too, except due to the parameter sharing these
162
+ params are actually used as weights in the final layer, so we include them.
163
+ """
164
+ n_params = sum(p.numel() for p in self.parameters())
165
+ if non_embedding:
166
+ n_params -= self.transformer.wpe.weight.numel()
167
+ return n_params
168
+
169
+ def _init_weights(self, module):
170
+ if isinstance(module, nn.Linear):
171
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
172
+ if module.bias is not None:
173
+ torch.nn.init.zeros_(module.bias)
174
+ elif isinstance(module, nn.Embedding):
175
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
176
+
177
+ def forward(self, idx, targets=None):
178
+ device = idx.device
179
+ b, t = idx.size()
180
+ assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
181
+ pos = torch.arange(0, t, dtype=torch.long, device=device).unsqueeze(0) # shape (1, t)
182
+
183
+ # forward the GPT model itself
184
+ tok_emb = self.transformer.wte(idx) # token embeddings of shape (b, t, n_embd)
185
+ pos_emb = self.transformer.wpe(pos) # position embeddings of shape (1, t, n_embd)
186
+ x = self.transformer.drop(tok_emb + pos_emb)
187
+ for block in self.transformer.h:
188
+ x = block(x)
189
+ x = self.transformer.ln_f(x)
190
+
191
+ if targets is not None:
192
+ # if we are given some desired targets also calculate the loss
193
+ logits = self.lm_head(x)
194
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
195
+ else:
196
+ # inference-time mini-optimization: only forward the lm_head on the very last position
197
+ logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
198
+ loss = None
199
+
200
+ return logits, loss
201
+
202
+ def crop_block_size(self, block_size):
203
+ # model surgery to decrease the block size if necessary
204
+ # e.g. we may load the GPT2 pretrained model checkpoint (block size 1024)
205
+ # but want to use a smaller block size for some smaller, simpler model
206
+ assert block_size <= self.config.block_size
207
+ self.config.block_size = block_size
208
+ self.transformer.wpe.weight = nn.Parameter(self.transformer.wpe.weight[:block_size])
209
+ for block in self.transformer.h:
210
+ if hasattr(block.attn, 'bias'):
211
+ block.attn.bias = block.attn.bias[:,:,:block_size,:block_size]
212
+
213
+ @classmethod
214
+ def from_pretrained(cls, model_type, override_args=None):
215
+ assert model_type in {'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'}
216
+ override_args = override_args or {} # default to empty dict
217
+ # only dropout can be overridden see more notes below
218
+ assert all(k == 'dropout' for k in override_args)
219
+ from transformers import GPT2LMHeadModel
220
+ print("loading weights from pretrained gpt: %s" % model_type)
221
+
222
+ # n_layer, n_head and n_embd are determined from model_type
223
+ config_args = {
224
+ 'gpt2': dict(n_layer=12, n_head=12, n_embd=768), # 124M params
225
+ 'gpt2-medium': dict(n_layer=24, n_head=16, n_embd=1024), # 350M params
226
+ 'gpt2-large': dict(n_layer=36, n_head=20, n_embd=1280), # 774M params
227
+ 'gpt2-xl': dict(n_layer=48, n_head=25, n_embd=1600), # 1558M params
228
+ }[model_type]
229
+ print("forcing vocab_size=50257, block_size=1024, bias=True")
230
+ config_args['vocab_size'] = 50257 # always 50257 for GPT model checkpoints
231
+ config_args['block_size'] = 1024 # always 1024 for GPT model checkpoints
232
+ config_args['bias'] = True # always True for GPT model checkpoints
233
+ # we can override the dropout rate, if desired
234
+ if 'dropout' in override_args:
235
+ print(f"overriding dropout rate to {override_args['dropout']}")
236
+ config_args['dropout'] = override_args['dropout']
237
+ # create a from-scratch initialized minGPT model
238
+ config = GPTConfig(**config_args)
239
+ model = GPT(config)
240
+ sd = model.state_dict()
241
+ sd_keys = sd.keys()
242
+ sd_keys = [k for k in sd_keys if not k.endswith('.attn.bias')] # discard this mask / buffer, not a param
243
+
244
+ # init a huggingface/transformers model
245
+ model_hf = GPT2LMHeadModel.from_pretrained(model_type)
246
+ sd_hf = model_hf.state_dict()
247
+
248
+ # copy while ensuring all of the parameters are aligned and match in names and shapes
249
+ sd_keys_hf = sd_hf.keys()
250
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.masked_bias')] # ignore these, just a buffer
251
+ sd_keys_hf = [k for k in sd_keys_hf if not k.endswith('.attn.bias')] # same, just the mask (buffer)
252
+ transposed = ['attn.c_attn.weight', 'attn.c_proj.weight', 'mlp.c_fc.weight', 'mlp.c_proj.weight']
253
+ # basically the openai checkpoints use a "Conv1D" module, but we only want to use a vanilla Linear
254
+ # this means that we have to transpose these weights when we import them
255
+ assert len(sd_keys_hf) == len(sd_keys), f"mismatched keys: {len(sd_keys_hf)} != {len(sd_keys)}"
256
+ for k in sd_keys_hf:
257
+ if any(k.endswith(w) for w in transposed):
258
+ # special treatment for the Conv1D weights we need to transpose
259
+ assert sd_hf[k].shape[::-1] == sd[k].shape
260
+ with torch.no_grad():
261
+ sd[k].copy_(sd_hf[k].t())
262
+ else:
263
+ # vanilla copy over the other parameters
264
+ assert sd_hf[k].shape == sd[k].shape
265
+ with torch.no_grad():
266
+ sd[k].copy_(sd_hf[k])
267
+
268
+ return model
269
+
270
+ def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
271
+ """
272
+ This long function is unfortunately doing something very simple and is being very defensive:
273
+ We are separating out all parameters of the model into two buckets: those that will experience
274
+ weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
275
+ We are then returning the PyTorch optimizer object.
276
+ """
277
+
278
+ # separate out all parameters to those that will and won't experience regularizing weight decay
279
+ decay = set()
280
+ no_decay = set()
281
+ whitelist_weight_modules = (torch.nn.Linear, )
282
+ blacklist_weight_modules = (torch.nn.LayerNorm, LayerNorm, torch.nn.Embedding)
283
+ for mn, m in self.named_modules():
284
+ for pn, p in m.named_parameters():
285
+ fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
286
+ # random note: because named_modules and named_parameters are recursive
287
+ # we will see the same tensors p many many times. but doing it this way
288
+ # allows us to know which parent module any tensor p belongs to...
289
+ if pn.endswith('bias'):
290
+ # all biases will not be decayed
291
+ no_decay.add(fpn)
292
+ elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
293
+ # weights of whitelist modules will be weight decayed
294
+ decay.add(fpn)
295
+ elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
296
+ # weights of blacklist modules will NOT be weight decayed
297
+ no_decay.add(fpn)
298
+
299
+ # subtle: 'transformer.wte.weight' and 'lm_head.weight' are tied, so they
300
+ # will appear in the no_decay and decay sets respectively after the above.
301
+ # In addition, because named_parameters() doesn't return duplicates, it
302
+ # will only return the first occurence, key'd by 'transformer.wte.weight', below.
303
+ # so let's manually remove 'lm_head.weight' from decay set. This will include
304
+ # this tensor into optimization via transformer.wte.weight only, and not decayed.
305
+ decay.remove('lm_head.weight')
306
+
307
+ # validate that we considered every parameter
308
+ param_dict = {pn: p for pn, p in self.named_parameters()}
309
+ inter_params = decay & no_decay
310
+ union_params = decay | no_decay
311
+ assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
312
+ assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
313
+ % (str(param_dict.keys() - union_params), )
314
+
315
+ # create the pytorch optimizer object
316
+ optim_groups = [
317
+ {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
318
+ {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
319
+ ]
320
+ # new PyTorch nightly has a new 'fused' option for AdamW that is much faster
321
+ use_fused = (device_type == 'cuda') and ('fused' in inspect.signature(torch.optim.AdamW).parameters)
322
+ print(f"using fused AdamW: {use_fused}")
323
+ extra_args = dict(fused=True) if use_fused else dict()
324
+ optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
325
+
326
+ return optimizer
327
+
328
+ def estimate_mfu(self, fwdbwd_per_iter, dt):
329
+ """ estimate model flops utilization (MFU) in units of A100 bfloat16 peak FLOPS """
330
+ # first estimate the number of flops we do per iteration.
331
+ # see PaLM paper Appendix B as ref: https://arxiv.org/abs/2204.02311
332
+ N = self.get_num_params()
333
+ cfg = self.config
334
+ L, H, Q, T = cfg.n_layer, cfg.n_head, cfg.n_embd//cfg.n_head, cfg.block_size
335
+ flops_per_token = 6*N + 12*L*H*Q*T
336
+ flops_per_fwdbwd = flops_per_token * T
337
+ flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
338
+ # express our flops throughput as ratio of A100 bfloat16 peak flops
339
+ flops_achieved = flops_per_iter * (1.0/dt) # per second
340
+ flops_promised = 312e12 # A100 GPU bfloat16 peak flops is 312 TFLOPS
341
+ mfu = flops_achieved / flops_promised
342
+ return mfu
343
+
344
+ @torch.no_grad()
345
+ def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
346
+ """
347
+ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete
348
+ the sequence max_new_tokens times, feeding the predictions back into the model each time.
349
+ Most likely you'll want to make sure to be in model.eval() mode of operation for this.
350
+ """
351
+ for _ in range(max_new_tokens):
352
+ # if the sequence context is growing too long we must crop it at block_size
353
+ idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
354
+ # forward the model to get the logits for the index in the sequence
355
+ logits, _ = self(idx_cond)
356
+ # pluck the logits at the final step and scale by desired temperature
357
+ logits = logits[:, -1, :] / temperature
358
+ # optionally crop the logits to only the top k options
359
+ if top_k is not None:
360
+ v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
361
+ logits[logits < v[:, [-1]]] = -float('Inf')
362
+ # apply softmax to convert logits to (normalized) probabilities
363
+ probs = F.softmax(logits, dim=-1)
364
+ # sample from the distribution
365
+ idx_next = torch.multinomial(probs, num_samples=1)
366
+ # append sampled index to the running sequence and continue
367
+ idx = torch.cat((idx, idx_next), dim=1)
368
+
369
+ return idx
sample.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sample from a trained model
3
+ """
4
+ import os
5
+ import pickle
6
+ from contextlib import nullcontext
7
+ import torch
8
+ import tiktoken
9
+ from model import GPTConfig, GPT
10
+
11
+ # -----------------------------------------------------------------------------
12
+ init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
13
+ out_dir = 'out' # ignored if init_from is not 'resume'
14
+ start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
15
+ num_samples = 10 # number of samples to draw
16
+ max_new_tokens = 500 # number of tokens generated in each sample
17
+ temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
18
+ top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
19
+ seed = 1337
20
+ device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
21
+ dtype = 'bfloat16' # 'float32' or 'bfloat16' or 'float16'
22
+ compile = False # use PyTorch 2.0 to compile the model to be faster
23
+ exec(open('configurator.py').read()) # overrides from command line or config file
24
+ # -----------------------------------------------------------------------------
25
+
26
+ torch.manual_seed(seed)
27
+ torch.cuda.manual_seed(seed)
28
+ torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
29
+ torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
30
+ device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
31
+ ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
32
+ ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
33
+
34
+ # model
35
+ if init_from == 'resume':
36
+ # init from a model saved in a specific directory
37
+ ckpt_path = os.path.join(out_dir, 'ckpt.pt')
38
+ checkpoint = torch.load(ckpt_path, map_location=device)
39
+ gptconf = GPTConfig(**checkpoint['model_args'])
40
+ model = GPT(gptconf)
41
+ state_dict = checkpoint['model']
42
+ unwanted_prefix = '_orig_mod.'
43
+ for k,v in list(state_dict.items()):
44
+ if k.startswith(unwanted_prefix):
45
+ state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
46
+ model.load_state_dict(state_dict)
47
+ elif init_from.startswith('gpt2'):
48
+ # init from a given GPT-2 model
49
+ model = GPT.from_pretrained(init_from, dict(dropout=0.0))
50
+
51
+ model.eval()
52
+ model.to(device)
53
+ if compile:
54
+ model = torch.compile(model) # requires PyTorch 2.0 (optional)
55
+
56
+ # look for the meta pickle in case it is available in the dataset folder
57
+ load_meta = False
58
+ if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these...
59
+ meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
60
+ load_meta = os.path.exists(meta_path)
61
+ if load_meta:
62
+ print(f"Loading meta from {meta_path}...")
63
+ with open(meta_path, 'rb') as f:
64
+ meta = pickle.load(f)
65
+ # TODO want to make this more general to arbitrary encoder/decoder schemes
66
+ stoi, itos = meta['stoi'], meta['itos']
67
+ encode = lambda s: [stoi[c] for c in s]
68
+ decode = lambda l: ''.join([itos[i] for i in l])
69
+ else:
70
+ # ok let's assume gpt-2 encodings by default
71
+ print("No meta.pkl found, assuming GPT-2 encodings...")
72
+ enc = tiktoken.get_encoding("gpt2")
73
+ encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
74
+ decode = lambda l: enc.decode(l)
75
+
76
+ # encode the beginning of the prompt
77
+ if start.startswith('FILE:'):
78
+ with open(start[5:], 'r', encoding='utf-8') as f:
79
+ start = f.read()
80
+ start_ids = encode(start)
81
+ x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
82
+
83
+ # run generation
84
+ with torch.no_grad():
85
+ with ctx:
86
+ for k in range(num_samples):
87
+ y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
88
+ print(decode(y[0].tolist()))
89
+ print('---------------')