Update README.md
Browse files
README.md
CHANGED
@@ -26,7 +26,7 @@ Original [Dataset](https://huggingface.co/datasets/roneneldan/TinyStories) conta
|
|
26 |
|
27 |
## Model description
|
28 |
|
29 |
-
A very very small model (
|
30 |
|
31 |
A [sample inference script](https://huggingface.co/Norod78/TinyStories-3M-val-Hebrew/blob/main/TinyStories-3M-val-Hebrew-inference.py) is available
|
32 |
|
@@ -47,4 +47,31 @@ The following hyperparameters were used during training:
|
|
47 |
- Transformers 4.31.0.dev0
|
48 |
- Pytorch 2.0.0
|
49 |
- Datasets 2.13.1
|
50 |
-
- Tokenizers 0.13.3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
## Model description
|
28 |
|
29 |
+
A very very small model (8M params) tarined on a very small dataset
|
30 |
|
31 |
A [sample inference script](https://huggingface.co/Norod78/TinyStories-3M-val-Hebrew/blob/main/TinyStories-3M-val-Hebrew-inference.py) is available
|
32 |
|
|
|
47 |
- Transformers 4.31.0.dev0
|
48 |
- Pytorch 2.0.0
|
49 |
- Datasets 2.13.1
|
50 |
+
- Tokenizers 0.13.3
|
51 |
+
|
52 |
+
- ### Parameter calculation
|
53 |
+
|
54 |
+
```
|
55 |
+
def gpt_params(seq_len, vocab_size, d_model, num_heads, num_layers):
|
56 |
+
""" Given GPT config calculate total number of parameters """
|
57 |
+
ffw_size = 4*d_model # in GPT the number of intermediate features is always 4*d_model
|
58 |
+
# token and position embeddings
|
59 |
+
embeddings = d_model * vocab_size + d_model * seq_len
|
60 |
+
# transformer blocks
|
61 |
+
attention = 3*d_model**2 + 3*d_model # weights and biases
|
62 |
+
attproj = d_model**2 + d_model
|
63 |
+
ffw = d_model*(ffw_size) + ffw_size
|
64 |
+
ffwproj = ffw_size*d_model + d_model
|
65 |
+
layernorms = 2*2*d_model
|
66 |
+
# dense
|
67 |
+
ln_f = 2*d_model
|
68 |
+
dense = d_model*vocab_size # note: no bias here
|
69 |
+
# note: embeddings are not included in the param count!
|
70 |
+
total_params = num_layers*(attention + attproj + ffw + ffwproj + layernorms) + ln_f + dense
|
71 |
+
return total_params
|
72 |
+
|
73 |
+
#gpt2 = dict(seq_len = 1024, vocab_size = 50257, d_model = 768, num_heads = 12, num_layers = 12)
|
74 |
+
gpt2 = dict(seq_len = 256, vocab_size = 50259, d_model = 128, num_heads = 16, num_layers = 8)
|
75 |
+
result = gpt_params(**gpt2)/1e6
|
76 |
+
print(result) #Prints 8.019584
|
77 |
+
```
|