jackal1586
commited on
Commit
•
24de675
1
Parent(s):
a4273a3
ckpt
Browse files- code-mt5.log +0 -0
- flax_model.msgpack +1 -1
- log_eval.py +92 -0
code-mt5.log
ADDED
The diff for this file is too large to render.
See raw diff
|
|
flax_model.msgpack
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 965944223
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb281ba98bce354df2dfefcda04cc61aec662cc3bee18236ec270c34f4c24542
|
3 |
size 965944223
|
log_eval.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# To add a new cell, type '# %%'
|
2 |
+
# To add a new markdown cell, type '# %% [markdown]'
|
3 |
+
# %%
|
4 |
+
from IPython import get_ipython
|
5 |
+
|
6 |
+
# %%
|
7 |
+
# get_ipython().system("ls -l ../logs")
|
8 |
+
|
9 |
+
|
10 |
+
# %%
|
11 |
+
# get_ipython().system(" cat ../logs/model_big.log")
|
12 |
+
|
13 |
+
|
14 |
+
# %%
|
15 |
+
path = "code-mt5.log"
|
16 |
+
losses = []
|
17 |
+
steps = []
|
18 |
+
eval_steps = []
|
19 |
+
eval_losses = []
|
20 |
+
eval_accs = []
|
21 |
+
learning_rate = []
|
22 |
+
with open(path, "r") as filePtr:
|
23 |
+
for line in filePtr:
|
24 |
+
print(line)
|
25 |
+
toks = line.split()
|
26 |
+
if toks[0] == "Step...":
|
27 |
+
if "Learning" in toks:
|
28 |
+
losses.append(float(toks[4].split(",")[0]))
|
29 |
+
steps.append(int(toks[1].split("(")[1]))
|
30 |
+
learning_rate.append(float(toks[-1].split(")")[0]))
|
31 |
+
if "Acc:" in toks:
|
32 |
+
eval_steps.append(int(toks[1].split("(")[1]))
|
33 |
+
eval_losses.append(float(toks[4].split(",")[0]))
|
34 |
+
eval_accs.append(float(toks[-1].split(")")[0]))
|
35 |
+
|
36 |
+
|
37 |
+
# %%
|
38 |
+
import matplotlib.pyplot as plt
|
39 |
+
|
40 |
+
# %%
|
41 |
+
# print(losses)
|
42 |
+
# print(steps)
|
43 |
+
|
44 |
+
|
45 |
+
# %%
|
46 |
+
print("Steps done: ", len(losses) * 100)
|
47 |
+
|
48 |
+
|
49 |
+
# %%
|
50 |
+
print("last 30 losses: ", losses[-30:])
|
51 |
+
|
52 |
+
|
53 |
+
# %%
|
54 |
+
plt.plot(steps, losses)
|
55 |
+
plt.show()
|
56 |
+
|
57 |
+
|
58 |
+
# %%
|
59 |
+
min_loss, at_step = 1e10, None
|
60 |
+
for step, loss in zip(steps, losses):
|
61 |
+
if loss < min_loss:
|
62 |
+
min_loss = loss
|
63 |
+
at_step = step
|
64 |
+
|
65 |
+
print("min loss: {} at step {}".format(min_loss, at_step))
|
66 |
+
|
67 |
+
|
68 |
+
# %%
|
69 |
+
print(eval_losses)
|
70 |
+
|
71 |
+
|
72 |
+
# %%
|
73 |
+
plt.plot(eval_steps, eval_losses)
|
74 |
+
plt.show()
|
75 |
+
|
76 |
+
|
77 |
+
# %%
|
78 |
+
print(eval_accs)
|
79 |
+
|
80 |
+
|
81 |
+
# %%
|
82 |
+
plt.plot(eval_steps, eval_accs)
|
83 |
+
plt.show()
|
84 |
+
|
85 |
+
|
86 |
+
# %%
|
87 |
+
plt.plot(steps, learning_rate)
|
88 |
+
plt.show()
|
89 |
+
|
90 |
+
|
91 |
+
# %%
|
92 |
+
|