Spaces:
Sleeping
Sleeping
Commit
·
031ac83
1
Parent(s):
92f14e0
believe to be performing backprop with the weights without error
Browse files- neural_network/activation.py +4 -12
- neural_network/backprop.py +35 -2
- neural_network/forwardprop.py +0 -6
- neural_network/main.py +10 -8
- requirements.txt +1 -0
neural_network/activation.py
CHANGED
@@ -1,23 +1,15 @@
|
|
1 |
import numpy as np
|
2 |
|
3 |
|
4 |
-
|
5 |
def sigmoid(x: float) -> float:
|
6 |
return 1.0 / (1.0 + np.exp(-x))
|
7 |
|
8 |
def sigmoid_prime(x: float) -> float:
|
9 |
return sigmoid(x) / (1.0 - sigmoid())
|
10 |
|
11 |
-
def relu(x
|
12 |
-
|
13 |
-
returns the input if > 0
|
14 |
-
"""
|
15 |
-
return max(0.0, x)
|
16 |
|
17 |
-
def relu_prime(x
|
18 |
-
|
19 |
-
returns 1 if input is +
|
20 |
-
returns 0 if input is -
|
21 |
-
"""
|
22 |
-
return 1 if x > 0 else 0
|
23 |
|
|
|
1 |
import numpy as np
|
2 |
|
3 |
|
|
|
4 |
def sigmoid(x: float) -> float:
|
5 |
return 1.0 / (1.0 + np.exp(-x))
|
6 |
|
7 |
def sigmoid_prime(x: float) -> float:
|
8 |
return sigmoid(x) / (1.0 - sigmoid())
|
9 |
|
10 |
+
def relu(x):
|
11 |
+
return np.maximum(x, 0)
|
|
|
|
|
|
|
12 |
|
13 |
+
def relu_prime(x):
|
14 |
+
return np.where(x > 0, 1, 0)
|
|
|
|
|
|
|
|
|
15 |
|
neural_network/backprop.py
CHANGED
@@ -1,7 +1,40 @@
|
|
1 |
import numpy as np
|
2 |
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
|
|
1 |
import numpy as np
|
2 |
|
3 |
+
from neural_network.opts import activation
|
4 |
|
5 |
+
|
6 |
+
def bp(X_train: np.array, y_train: np.array, wb: dict, args: dict):
|
7 |
+
epochs = args["epochs"]
|
8 |
+
func = activation[args["activation_func"]]["main"]
|
9 |
+
func_prime = activation[args["activation_func"]]["prime"]
|
10 |
+
w1, w2 = wb["W1"], wb["W2"]
|
11 |
+
b1, b2 = wb["b1"], wb["b2"]
|
12 |
+
lr = args["learning_rate"]
|
13 |
+
|
14 |
+
for e in range(epochs):
|
15 |
+
# forward prop
|
16 |
+
node1 = compute_node(X_train, w1, b1, func)
|
17 |
+
y_hat = compute_node(node1, w2, b2, func)
|
18 |
+
error = y_hat - y_train
|
19 |
+
|
20 |
+
# backprop
|
21 |
+
# right now this is just the weights,
|
22 |
+
# we should also update the biases
|
23 |
+
dw2 = np.dot(
|
24 |
+
node1.T,
|
25 |
+
error * func_prime(y_hat),
|
26 |
+
)
|
27 |
+
dw1 = np.dot(
|
28 |
+
X_train.T,
|
29 |
+
np.dot(error * func_prime(y_hat), w2.T) * func_prime(node1),
|
30 |
+
)
|
31 |
+
|
32 |
+
# update weights & biases
|
33 |
+
w1 -= lr * dw1
|
34 |
+
w2 -= lr * dw2
|
35 |
+
|
36 |
+
|
37 |
+
def compute_node(X, w, b, func):
|
38 |
+
return func(np.dot(X, w) + b)
|
39 |
|
40 |
|
neural_network/forwardprop.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
def fp():
|
5 |
-
return
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/main.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
from neural_network.forwardprop import fp
|
4 |
from neural_network.backprop import bp
|
|
|
5 |
|
6 |
|
7 |
def get_args() -> dict:
|
@@ -35,13 +34,16 @@ def main(
|
|
35 |
X: np.array,
|
36 |
y: np.array,
|
37 |
) -> None:
|
|
|
38 |
args = get_args()
|
39 |
wb = init(X, y, args["hidden_size"])
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
|
42 |
-
fp()
|
43 |
-
bp()
|
44 |
|
45 |
-
# update weights and biases
|
46 |
|
47 |
-
# print results
|
|
|
1 |
+
from sklearn.model_selection import train_test_split
|
|
|
|
|
2 |
from neural_network.backprop import bp
|
3 |
+
import numpy as np
|
4 |
|
5 |
|
6 |
def get_args() -> dict:
|
|
|
34 |
X: np.array,
|
35 |
y: np.array,
|
36 |
) -> None:
|
37 |
+
|
38 |
args = get_args()
|
39 |
wb = init(X, y, args["hidden_size"])
|
40 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
41 |
+
X,
|
42 |
+
y,
|
43 |
+
test_size=0.3,
|
44 |
+
random_state=8675309
|
45 |
+
)
|
46 |
|
47 |
+
results = bp(X_train, y_train, wb, args)
|
|
|
|
|
48 |
|
|
|
49 |
|
|
requirements.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
numpy==1.24.2
|
|
|
|
1 |
numpy==1.24.2
|
2 |
+
scikit_learn==1.2.2
|