p208p2002 commited on
Commit
881388e
Β·
1 Parent(s): 5fe780f
Files changed (2) hide show
  1. app.py +107 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ import gradio as gr
4
+ import matplotlib.pyplot as plt
5
+
6
+ description = """
7
+ Minimizing L under the constraint FLOPs(N, D) = C.
8
+
9
+ The functions $N_{opt}(C)$, and $D_{opt}(C)$ describe the optimal allocation of a computational budget $C$.
10
+
11
+ We use the following notation:
12
+
13
+ β€’ L – the cross entropy loss in nats. Typically it will be averaged over the tokens in a context, but in
14
+ some cases we report the loss for specific tokens within the context.
15
+
16
+ β€’ N – the number of model parameters, excluding all vocabulary and positional embeddings
17
+
18
+ β€’ D – the dataset size in tokens
19
+
20
+ β€’ C β‰ˆ 6ND – an estimate of the total non-embedding training compute
21
+
22
+ $$E=1.69, A=406.4, \\alpha=0.34, \\beta=0.28$$
23
+ $$C\\approx6DN$$
24
+ $$L(N,D)=E+\\frac{A}{N^\\alpha}+\\frac{B}{D^\\beta}$$
25
+ $$N_{opt}(C),D_{opt}(C)={\\arg\\min}_{N,D\ s.t.\ FLOP/s(N,D)=C}\ L(N,D)$$
26
+
27
+ """
28
+
29
+ article = """
30
+ References
31
+ - [Training Compute-Optimal Large Language Models](https://arxiv.org/pdf/2203.15556.pdf)
32
+ - [Scaling Laws for Neural Language Models](https://arxiv.org/pdf/2001.08361.pdf)
33
+ - [karpathy/nanoGPT](https://github.com/karpathy/nanoGPT/blob/master/scaling_laws.ipynb)
34
+ """
35
+
36
+
37
+ def L(N, D):
38
+ """
39
+ Approximates loss given N parameters and D dataset size (in tokens),
40
+ per Chinchilla paper.
41
+ """
42
+ E = 1.69 # entropy of natural language, limit of infinite model on infinite data
43
+ A = 406.4
44
+ B = 410.7
45
+ alpha = 0.34
46
+ beta = 0.28
47
+ return A / (N ** alpha) + B / (D ** beta) + E
48
+
49
+
50
+ def plot_pens(tflpos_card, utilization, num_gps, training_days):
51
+ fig = plt.figure()
52
+ tflpos_card = float(tflpos_card)*(10**12)
53
+ utilization = float(utilization)
54
+ num_gps = int(num_gps)
55
+ training_days = float(training_days)
56
+
57
+ # target compute budget (usually know this because we know how many GPU for how long go brrr)
58
+ c = tflpos_card*num_gps*86400*training_days*utilization
59
+
60
+ # (I got this flop number from row 1 of Table A3)
61
+ # sweep model sizes from 10M to 100B
62
+ ns = 10 ** np.arange(7, 11, step=2**-4)
63
+ # using C = 6*N*D, solve for D that maintains the compute budget c
64
+ ds = c / (6 * ns)
65
+ # evaluate the loss in each case
66
+ losses = L(ns, ds)
67
+ # find the argmin
68
+ best = np.argmin(losses)
69
+
70
+ best_model_size = f"{ns[best]/1e6:.2f}M"
71
+ best_dataset_size = f"{ds[best]/1e9:.2f}B"
72
+
73
+ # plot the loss
74
+ # plt.figure(figsize=(3,3))
75
+ plt.plot(ns, losses)
76
+ plt.xscale('log')
77
+ # plot a vertical bar at the best model size
78
+ plt.axvline(ns[best], color='red')
79
+ plt.xlabel('model size')
80
+ plt.ylabel('loss')
81
+
82
+ return fig, c, round(losses[best], 3), best_model_size, best_dataset_size
83
+
84
+
85
+ if __name__ == "__main__":
86
+ iface = gr.Interface(
87
+ fn=plot_pens,
88
+ layout='vertical',
89
+ inputs=[
90
+ gr.Textbox(label="TFLOP/s pre Card",value="40"),
91
+ gr.Slider(label="System Utilization", minimum=0, maximum=1, step=0.01,value=0.25),
92
+ gr.Textbox(label="Number of cards",value="1"),
93
+ gr.Textbox(label="Training Days",value="7")
94
+ ],
95
+ outputs=[
96
+ gr.Plot(label="Estimated Loss"),
97
+ gr.Label(label="Total Compute Budget"),
98
+ gr.Label(label="Estimated Final Loss"),
99
+ gr.Label(label="Optimal Model Size"),
100
+ gr.Label(label="Optimal Dataset Size")
101
+ ],
102
+ title="Compute-Optimal Model Estimator",
103
+ description=description,
104
+ article=article,
105
+ theme='peach',
106
+ live=False
107
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ seaborn
2
+ gradio
3
+ pandas
4
+ numpy