Spaces:
Sleeping
Sleeping
Jensen-holm
commited on
Commit
•
da3cde4
1
Parent(s):
f308820
going to use this branch to re make this program in go using the gota
Browse filespackage. Code will be cleaner and we will be able to add more features
easier.
- .gitignore +18 -126
- LICENSE +0 -21
- README.md +0 -52
- app.py +0 -44
- cluster/clusterer.py +0 -22
- cluster/distance.py +0 -19
- cluster/heirarchical.py +0 -0
- cluster/kmeans.py +0 -78
- cluster/kmedoids.py +0 -15
- cluster/main.py +0 -22
- cluster/opts.py +0 -8
- cluster/plot.py +0 -39
- dataset/iris.py +0 -14
- example/kmeans.py +0 -31
- example/kmedoids.py +0 -0
- example/neural_network.py +0 -27
- go.mod +9 -0
- go.sum +72 -0
- main.go +5 -0
- neural_network/activation.py +0 -26
- neural_network/backprop.py +0 -73
- neural_network/main.py +0 -51
- neural_network/neural_network.py +0 -86
- neural_network/opts.py +0 -18
- opts.py +0 -10
- requirements.txt +0 -8
.gitignore
CHANGED
@@ -1,133 +1,25 @@
|
|
1 |
-
#
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
|
|
7 |
*.so
|
|
|
8 |
|
9 |
-
#
|
10 |
-
|
11 |
-
build/
|
12 |
-
develop-eggs/
|
13 |
-
dist/
|
14 |
-
downloads/
|
15 |
-
eggs/
|
16 |
-
.eggs/
|
17 |
-
lib/
|
18 |
-
lib64/
|
19 |
-
parts/
|
20 |
-
sdist/
|
21 |
-
var/
|
22 |
-
wheels/
|
23 |
-
pip-wheel-metadata/
|
24 |
-
share/python-wheels/
|
25 |
-
*.egg-info/
|
26 |
-
.installed.cfg
|
27 |
-
*.egg
|
28 |
-
MANIFEST
|
29 |
-
|
30 |
-
# PyInstaller
|
31 |
-
# Usually these files are written by a python script from a template
|
32 |
-
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
-
*.manifest
|
34 |
-
*.spec
|
35 |
-
|
36 |
-
# Installer logs
|
37 |
-
pip-log.txt
|
38 |
-
pip-delete-this-directory.txt
|
39 |
-
|
40 |
-
# Unit test / coverage reports
|
41 |
-
htmlcov/
|
42 |
-
.tox/
|
43 |
-
.nox/
|
44 |
-
.coverage
|
45 |
-
.coverage.*
|
46 |
-
.cache
|
47 |
-
nosetests.xml
|
48 |
-
coverage.xml
|
49 |
-
*.cover
|
50 |
-
*.py,cover
|
51 |
-
.hypothesis/
|
52 |
-
.pytest_cache/
|
53 |
-
|
54 |
-
# Translations
|
55 |
-
*.mo
|
56 |
-
*.pot
|
57 |
-
|
58 |
-
# Django stuff:
|
59 |
-
*.log
|
60 |
-
local_settings.py
|
61 |
-
db.sqlite3
|
62 |
-
db.sqlite3-journal
|
63 |
-
|
64 |
-
# Flask stuff:
|
65 |
-
instance/
|
66 |
-
.webassets-cache
|
67 |
-
|
68 |
-
# Scrapy stuff:
|
69 |
-
.scrapy
|
70 |
-
|
71 |
-
# Sphinx documentation
|
72 |
-
docs/_build/
|
73 |
-
|
74 |
-
# PyBuilder
|
75 |
-
target/
|
76 |
-
|
77 |
-
# Jupyter Notebook
|
78 |
-
.ipynb_checkpoints
|
79 |
-
|
80 |
-
# IPython
|
81 |
-
profile_default/
|
82 |
-
ipython_config.py
|
83 |
|
84 |
-
#
|
85 |
-
|
86 |
|
87 |
-
#
|
88 |
-
#
|
89 |
-
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90 |
-
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91 |
-
# install all needed dependencies.
|
92 |
-
#Pipfile.lock
|
93 |
|
94 |
-
#
|
95 |
-
|
96 |
-
|
97 |
-
# Celery stuff
|
98 |
-
celerybeat-schedule
|
99 |
-
celerybeat.pid
|
100 |
-
|
101 |
-
# SageMath parsed files
|
102 |
-
*.sage.py
|
103 |
-
|
104 |
-
# Environments
|
105 |
-
.env
|
106 |
-
.venv
|
107 |
-
env/
|
108 |
-
venv/
|
109 |
-
ENV/
|
110 |
-
env.bak/
|
111 |
-
venv.bak/
|
112 |
-
|
113 |
-
# Spyder project settings
|
114 |
-
.spyderproject
|
115 |
-
.spyproject
|
116 |
-
|
117 |
-
# Rope project settings
|
118 |
-
.ropeproject
|
119 |
-
|
120 |
-
# mkdocs documentation
|
121 |
-
/site
|
122 |
-
|
123 |
-
# mypy
|
124 |
-
.mypy_cache/
|
125 |
-
.dmypy.json
|
126 |
-
dmypy.json
|
127 |
-
|
128 |
-
# Pyre type checker
|
129 |
-
.pyre/
|
130 |
-
|
131 |
-
.idea
|
132 |
|
|
|
133 |
.vscode
|
|
|
|
1 |
+
# If you prefer the allow list template instead of the deny list, see community template:
|
2 |
+
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
|
3 |
+
#
|
4 |
+
# Binaries for programs and plugins
|
5 |
+
*.exe
|
6 |
+
*.exe~
|
7 |
+
*.dll
|
8 |
*.so
|
9 |
+
*.dylib
|
10 |
|
11 |
+
# Test binary, built with `go test -c`
|
12 |
+
*.test
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
# Output of the go coverage tool, specifically when used with LiteIDE
|
15 |
+
*.out
|
16 |
|
17 |
+
# Dependency directories (remove the comment below to include it)
|
18 |
+
# vendor/
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
# Go workspace file
|
21 |
+
go.work
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
.DS_Store
|
24 |
.vscode
|
25 |
+
.idea
|
LICENSE
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
MIT License
|
2 |
-
|
3 |
-
Copyright (c) 2023 Jensen Holm
|
4 |
-
|
5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
-
of this software and associated documentation files (the "Software"), to deal
|
7 |
-
in the Software without restriction, including without limitation the rights
|
8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
-
copies of the Software, and to permit persons to whom the Software is
|
10 |
-
furnished to do so, subject to the following conditions:
|
11 |
-
|
12 |
-
The above copyright notice and this permission notice shall be included in all
|
13 |
-
copies or substantial portions of the Software.
|
14 |
-
|
15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
# Data Mining from scratch backend
|
2 |
-
|
3 |
-
Currently living [here](https://data-mining-from-scratch-backend.onrender.com/) <br>
|
4 |
-
Since the API is hosted using render's free tier, <br>
|
5 |
-
every time 15 minutes goes by it gets shut down. <br>
|
6 |
-
If a request is made while it is shut down, the web service <br>
|
7 |
-
has to spin back up again which takes roughly 1 minute <br>
|
8 |
-
|
9 |
-
### Example Useage
|
10 |
-
|
11 |
-
```python
|
12 |
-
import requests
|
13 |
-
import json
|
14 |
-
|
15 |
-
request_params = {
|
16 |
-
"arguments": {
|
17 |
-
"epochs": 100,
|
18 |
-
"activation_func": "tanh",
|
19 |
-
"hidden_size": 8,
|
20 |
-
"learning_rate": 0.01
|
21 |
-
}
|
22 |
-
}
|
23 |
-
|
24 |
-
headers = {
|
25 |
-
"Content-Type": "application/json",
|
26 |
-
}
|
27 |
-
|
28 |
-
r = requests.post(
|
29 |
-
"https://data-mining-from-scratch-backend.onrender.com/neural-network",
|
30 |
-
headers=headers,
|
31 |
-
data=json.dumps(request_params),
|
32 |
-
)
|
33 |
-
|
34 |
-
model_data = r.json()
|
35 |
-
print(model_data)
|
36 |
-
```
|
37 |
-
|
38 |
-
### Parameter Options
|
39 |
-
|
40 |
-
- End Points: <br>
|
41 |
-
|
42 |
-
-`"neural-network"` <br>
|
43 |
-
|
44 |
-
- `"kmeans-clustering"` <br> -`"kmedoid-clustering"` <br> -`"heirarchical-clustering"` <br>
|
45 |
-
|
46 |
-
- Algorithm Specific Arguments
|
47 |
-
|
48 |
-
- neural-network
|
49 |
-
- epochs: any integer
|
50 |
-
- activation_func: tanh, sigmoid, or relu
|
51 |
-
- hidden_size: must be an even integer
|
52 |
-
- learning_rate: any floating point number
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
from flask import Flask, request, jsonify, render_template
|
2 |
-
from flask_cors import CORS
|
3 |
-
|
4 |
-
from dataset.iris import iris
|
5 |
-
from opts import options
|
6 |
-
|
7 |
-
# using the iris data set for every algorithm
|
8 |
-
# just for simplicity sake
|
9 |
-
X, y = iris()
|
10 |
-
|
11 |
-
app = Flask(__name__)
|
12 |
-
|
13 |
-
CORS(app, origins="*")
|
14 |
-
|
15 |
-
|
16 |
-
@app.route("/neural-network", methods=["POST"])
|
17 |
-
def neural_network():
|
18 |
-
algorithm = options["neural-network"]
|
19 |
-
args = request.json["arguments"]
|
20 |
-
|
21 |
-
result = algorithm(
|
22 |
-
X=X,
|
23 |
-
y=y,
|
24 |
-
args=args,
|
25 |
-
)
|
26 |
-
return jsonify(result)
|
27 |
-
|
28 |
-
|
29 |
-
@app.route("/kmeans-clustering", methods=["POST"])
|
30 |
-
def kmeans():
|
31 |
-
algorithm = options["kmeans-clustering"]
|
32 |
-
args = request.json["arguments"]
|
33 |
-
|
34 |
-
result = algorithm(
|
35 |
-
X=X,
|
36 |
-
y=y,
|
37 |
-
clusterer="kmeans-clustering",
|
38 |
-
args=args,
|
39 |
-
)
|
40 |
-
return jsonify(result)
|
41 |
-
|
42 |
-
|
43 |
-
if __name__ == "__main__":
|
44 |
-
app.run(debug=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/clusterer.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from typing import Callable
|
3 |
-
|
4 |
-
|
5 |
-
@dataclass
|
6 |
-
class Clusterer:
|
7 |
-
cluster_func: Callable
|
8 |
-
plt_data = None
|
9 |
-
|
10 |
-
def eval(
|
11 |
-
self,
|
12 |
-
pred_labels,
|
13 |
-
true_labels,
|
14 |
-
) -> None:
|
15 |
-
...
|
16 |
-
|
17 |
-
def set_plot_data(self, plt_data):
|
18 |
-
self.plt_data = plt_data
|
19 |
-
|
20 |
-
@classmethod
|
21 |
-
def from_dict(cls, dct: dict):
|
22 |
-
return cls(**dct)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/distance.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
# right now I am not using this function
|
4 |
-
# maybe get rid of it or change it to how we
|
5 |
-
# use it in our distance calculations
|
6 |
-
|
7 |
-
def euclidean(
|
8 |
-
point: np.array,
|
9 |
-
data: np.array,
|
10 |
-
):
|
11 |
-
"""
|
12 |
-
Computed the euclidean distance
|
13 |
-
between a point and the rest
|
14 |
-
of the dataset
|
15 |
-
point dims: (m,)
|
16 |
-
data dims: (n, m)
|
17 |
-
output dims: (n,)
|
18 |
-
"""
|
19 |
-
return np.sqrt(np.sum((point - data)**2), aixs=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/heirarchical.py
DELETED
File without changes
|
cluster/kmeans.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
from cluster.clusterer import Clusterer
|
5 |
-
|
6 |
-
|
7 |
-
@dataclass
|
8 |
-
class Kmeans(Clusterer):
|
9 |
-
k: int
|
10 |
-
max_iter: int
|
11 |
-
centroids = None
|
12 |
-
clusters = None
|
13 |
-
|
14 |
-
def build(
|
15 |
-
self,
|
16 |
-
X: np.array,
|
17 |
-
) -> None:
|
18 |
-
# randomly initialize centroids
|
19 |
-
centroids = X[np.random.choice(
|
20 |
-
X.shape[0],
|
21 |
-
self.k,
|
22 |
-
replace=False,
|
23 |
-
)]
|
24 |
-
|
25 |
-
# Calculate Euclidean distance between each data point and each centroid
|
26 |
-
# then assign each point to its closest cluster
|
27 |
-
clusters = self.assign_clusters(X, centroids)
|
28 |
-
centroids = self.update_centroids(self.k, X, clusters)
|
29 |
-
|
30 |
-
while True:
|
31 |
-
new_clusts = self.assign_clusters(X, centroids)
|
32 |
-
if np.array_equal(new_clusts, clusters):
|
33 |
-
break
|
34 |
-
clusters = new_clusts
|
35 |
-
centroids = self.update_centroids(self.k, X, clusters)
|
36 |
-
|
37 |
-
self.clusters = clusters
|
38 |
-
self.centroids = centroids
|
39 |
-
|
40 |
-
@staticmethod
|
41 |
-
def assign_clusters(
|
42 |
-
X: np.array,
|
43 |
-
centroids: np.array,
|
44 |
-
) -> np.array:
|
45 |
-
distances = np.sqrt(((X - centroids[:, np.newaxis])**2).sum(axis=2))
|
46 |
-
clusts = np.argmin(distances, axis=0)
|
47 |
-
return clusts
|
48 |
-
|
49 |
-
@staticmethod
|
50 |
-
def update_centroids(
|
51 |
-
k: int,
|
52 |
-
X: np.array,
|
53 |
-
clusters: np.array,
|
54 |
-
) -> np.array:
|
55 |
-
centroids = np.zeros((k, X.shape[1]))
|
56 |
-
for i in range(k):
|
57 |
-
centroids[i] = X[clusters == i].mean(axis=0)
|
58 |
-
return centroids
|
59 |
-
|
60 |
-
def to_dict(
|
61 |
-
self,
|
62 |
-
X: np.array,
|
63 |
-
) -> dict:
|
64 |
-
cluster_data = []
|
65 |
-
for i in range(self.k):
|
66 |
-
indices = np.where(self.clusters == i)[0]
|
67 |
-
cluster_pts = X[indices].tolist()
|
68 |
-
cluster_data.append({
|
69 |
-
"cluster_id": i,
|
70 |
-
"centroid": self.centroids[i].tolist(),
|
71 |
-
"points": cluster_pts,
|
72 |
-
})
|
73 |
-
return {
|
74 |
-
"k": self.k,
|
75 |
-
"max_iter": self.max_iter,
|
76 |
-
"clusters": cluster_data,
|
77 |
-
"plt_data": self.plt_data,
|
78 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/kmedoids.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
from cluster.clusterer import Clusterer
|
5 |
-
|
6 |
-
|
7 |
-
@dataclass
|
8 |
-
class Kmedoids(Clusterer):
|
9 |
-
k: int
|
10 |
-
|
11 |
-
def main(self, X):
|
12 |
-
...
|
13 |
-
|
14 |
-
def build(self, X: np.array):
|
15 |
-
...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/main.py
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
from cluster.clusterer import Clusterer
|
4 |
-
from cluster.opts import clustering_methods
|
5 |
-
from cluster.plot import plot
|
6 |
-
|
7 |
-
|
8 |
-
def main(
|
9 |
-
X: np.array,
|
10 |
-
y: np.array,
|
11 |
-
clusterer: str,
|
12 |
-
args: dict,
|
13 |
-
) -> dict:
|
14 |
-
cluster_alg: Clusterer = clustering_methods[clusterer]
|
15 |
-
|
16 |
-
args.update({"cluster_func": cluster_alg})
|
17 |
-
alg = cluster_alg.from_dict(args)
|
18 |
-
|
19 |
-
alg.build(X)
|
20 |
-
plt_data = plot(clusterer=alg, X=X)
|
21 |
-
alg.set_plot_data(plt_data)
|
22 |
-
return alg.to_dict(X)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/opts.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from cluster.kmedoids import Kmedoids
|
2 |
-
from cluster.kmeans import Kmeans
|
3 |
-
|
4 |
-
|
5 |
-
clustering_methods = {
|
6 |
-
"kmeans-clustering": Kmeans,
|
7 |
-
"kmedoids-clustering": Kmedoids,
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cluster/plot.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import matplotlib
|
2 |
-
import matplotlib.pyplot as plt
|
3 |
-
import seaborn as sns
|
4 |
-
import io
|
5 |
-
|
6 |
-
|
7 |
-
matplotlib.use("Agg")
|
8 |
-
sns.set()
|
9 |
-
|
10 |
-
|
11 |
-
def plot(clusterer, X):
|
12 |
-
cluster_data = clusterer.to_dict(X)["clusters"]
|
13 |
-
fig, ax = plt.subplots(figsize=(8, 6))
|
14 |
-
for cluster in cluster_data:
|
15 |
-
sns.scatterplot(
|
16 |
-
x=[point[0] for point in cluster["points"]],
|
17 |
-
y=[point[1] for point in cluster["points"]],
|
18 |
-
label=f"Cluster {cluster['cluster_id']}",
|
19 |
-
ax=ax,
|
20 |
-
)
|
21 |
-
ax.scatter(
|
22 |
-
x=cluster["centroid"][0],
|
23 |
-
y=cluster["centroid"][1],
|
24 |
-
marker="x",
|
25 |
-
s=100,
|
26 |
-
linewidth=2,
|
27 |
-
color="red",
|
28 |
-
)
|
29 |
-
ax.legend()
|
30 |
-
ax.set_title("K-means Clustering")
|
31 |
-
ax.set_ylabel("Normalized Petal Length (cm)")
|
32 |
-
ax.set_xlabel("Normalized Petal Length (cm)")
|
33 |
-
|
34 |
-
# Save the plot to a BytesIO buffer
|
35 |
-
buffer = io.BytesIO()
|
36 |
-
plt.savefig(buffer, format='png')
|
37 |
-
buffer.seek(0)
|
38 |
-
|
39 |
-
return buffer.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset/iris.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from sklearn.datasets import load_iris
|
2 |
-
from sklearn.preprocessing import OneHotEncoder, StandardScaler
|
3 |
-
|
4 |
-
def iris():
|
5 |
-
"""
|
6 |
-
returns a tuple of numpy arrays containing the
|
7 |
-
iris dataset split into training and testing sets
|
8 |
-
after being normalized and one-hot encoded
|
9 |
-
"""
|
10 |
-
iris = load_iris()
|
11 |
-
scaler = StandardScaler()
|
12 |
-
x = scaler.fit_transform(iris.data)
|
13 |
-
y = OneHotEncoder().fit_transform(iris.target.reshape(-1, 1)).toarray()
|
14 |
-
return x, y
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
example/kmeans.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
import matplotlib.pyplot as plt
|
5 |
-
import seaborn as sns
|
6 |
-
|
7 |
-
|
8 |
-
ENDPOINT: str = "http://127.0.0.1:5000/"
|
9 |
-
|
10 |
-
request_params = {
|
11 |
-
"arguments": {
|
12 |
-
"clusterer": "kmeans-clustering",
|
13 |
-
"k": 3,
|
14 |
-
"max_iter": 100,
|
15 |
-
},
|
16 |
-
}
|
17 |
-
|
18 |
-
|
19 |
-
headers = {
|
20 |
-
"Content-Type": "application/json",
|
21 |
-
}
|
22 |
-
|
23 |
-
r = requests.post(
|
24 |
-
ENDPOINT,
|
25 |
-
headers=headers,
|
26 |
-
data=json.dumps(request_params),
|
27 |
-
)
|
28 |
-
|
29 |
-
|
30 |
-
if __name__ == "__main__":
|
31 |
-
print(r.json())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
example/kmedoids.py
DELETED
File without changes
|
example/neural_network.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
-
|
4 |
-
|
5 |
-
ENDPOINT: str = "http://127.0.0.1:5000/neural-network"
|
6 |
-
|
7 |
-
request_params = {
|
8 |
-
"arguments": {
|
9 |
-
"epochs": 100,
|
10 |
-
"activation_func": "tanh",
|
11 |
-
"hidden_size": 8,
|
12 |
-
"learning_rate": 0.01
|
13 |
-
}
|
14 |
-
}
|
15 |
-
|
16 |
-
headers = {
|
17 |
-
"Content-Type": "application/json",
|
18 |
-
}
|
19 |
-
|
20 |
-
r = requests.post(
|
21 |
-
ENDPOINT,
|
22 |
-
headers=headers,
|
23 |
-
data=json.dumps(request_params),
|
24 |
-
)
|
25 |
-
|
26 |
-
if __name__ == "__main__":
|
27 |
-
print(r.json()["plt_data"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
go.mod
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
module github.com/Jensen-holm/ml-from-scratch
|
2 |
+
|
3 |
+
go 1.19
|
4 |
+
|
5 |
+
require (
|
6 |
+
github.com/go-gota/gota v0.12.0 // indirect
|
7 |
+
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 // indirect
|
8 |
+
gonum.org/v1/gonum v0.9.1 // indirect
|
9 |
+
)
|
go.sum
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
2 |
+
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
|
3 |
+
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
4 |
+
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
5 |
+
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
6 |
+
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
7 |
+
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
8 |
+
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
9 |
+
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
|
10 |
+
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
|
11 |
+
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
|
12 |
+
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
|
13 |
+
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
14 |
+
github.com/go-gota/gota v0.12.0 h1:T5BDg1hTf5fZ/CO+T/N0E+DDqUhvoKBl+UVckgcAAQg=
|
15 |
+
github.com/go-gota/gota v0.12.0/go.mod h1:UT+NsWpZC/FhaOyWb9Hui0jXg0Iq8e/YugZHTbyW/34=
|
16 |
+
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
17 |
+
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
18 |
+
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
19 |
+
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
20 |
+
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
|
21 |
+
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
|
22 |
+
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
23 |
+
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
24 |
+
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
25 |
+
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
|
26 |
+
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
27 |
+
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
28 |
+
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
29 |
+
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
30 |
+
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
31 |
+
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
32 |
+
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
33 |
+
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
|
34 |
+
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
35 |
+
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
36 |
+
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
37 |
+
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
38 |
+
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
39 |
+
golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
40 |
+
golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
41 |
+
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
42 |
+
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
43 |
+
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
44 |
+
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
45 |
+
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
46 |
+
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
47 |
+
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 h1:0PC75Fz/kyMGhL0e1QnypqK2kQMqKt9csD1GnMJR+Zk=
|
48 |
+
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
49 |
+
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
50 |
+
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
51 |
+
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
52 |
+
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
53 |
+
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
54 |
+
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
55 |
+
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
56 |
+
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
57 |
+
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
58 |
+
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
59 |
+
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
60 |
+
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
61 |
+
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
62 |
+
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
63 |
+
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
64 |
+
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
65 |
+
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
66 |
+
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
|
67 |
+
gonum.org/v1/gonum v0.9.1 h1:HCWmqqNoELL0RAQeKBXWtkp04mGk8koafcB4He6+uhc=
|
68 |
+
gonum.org/v1/gonum v0.9.1/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
|
69 |
+
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
70 |
+
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
71 |
+
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
|
72 |
+
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
main.go
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
package main
|
2 |
+
|
3 |
+
func main() {
|
4 |
+
|
5 |
+
}
|
neural_network/activation.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
def relu(x):
|
5 |
-
return np.maximum(x, 0)
|
6 |
-
|
7 |
-
|
8 |
-
def relu_prime(x):
|
9 |
-
return np.where(x > 0, 1, 0)
|
10 |
-
|
11 |
-
|
12 |
-
def tanh(x):
|
13 |
-
return np.tanh(x)
|
14 |
-
|
15 |
-
|
16 |
-
def tanh_prime(x):
|
17 |
-
return 1 - np.tanh(x) ** 2
|
18 |
-
|
19 |
-
|
20 |
-
def sigmoid(x):
|
21 |
-
return 1.0 / (1.0 + np.exp(-x))
|
22 |
-
|
23 |
-
|
24 |
-
def sigmoid_prime(x):
|
25 |
-
s = sigmoid(x)
|
26 |
-
return s / 1.0 - s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/backprop.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from typing import Callable
|
3 |
-
|
4 |
-
from neural_network.neural_network import NeuralNetwork
|
5 |
-
|
6 |
-
|
7 |
-
def fp(
|
8 |
-
X_train: np.array,
|
9 |
-
y_train: np.array,
|
10 |
-
func: Callable,
|
11 |
-
w1: np.array,
|
12 |
-
w2: np.array,
|
13 |
-
b1: np.array,
|
14 |
-
b2: np.array,
|
15 |
-
):
|
16 |
-
n1 = compute_node(arr=X_train, w=w1, b=b1, func=func)
|
17 |
-
y_hat = compute_node(arr=n1, w=w2, b=b2, func=func)
|
18 |
-
return y_hat, n1, (y_hat-y_train)
|
19 |
-
|
20 |
-
|
21 |
-
def bp(
|
22 |
-
X_train: np.array,
|
23 |
-
y_train: np.array,
|
24 |
-
wb: dict,
|
25 |
-
args: dict,
|
26 |
-
) -> NeuralNetwork:
|
27 |
-
args.update(wb)
|
28 |
-
model = NeuralNetwork.from_dict(args)
|
29 |
-
loss_history = []
|
30 |
-
for _ in range(model.epochs):
|
31 |
-
# forward prop
|
32 |
-
y_hat, node1, error = fp(
|
33 |
-
X_train=X_train,
|
34 |
-
y_train=y_train,
|
35 |
-
func=model.activation_func,
|
36 |
-
w1=model.w1, w2=model.w2, b1=model.b1, b2=model.b2,
|
37 |
-
)
|
38 |
-
mean_squared_error = mse(y_train, y_hat)
|
39 |
-
loss_history.append(mean_squared_error)
|
40 |
-
|
41 |
-
# backprop
|
42 |
-
dw1 = np.dot(
|
43 |
-
X_train.T,
|
44 |
-
np.dot(error * model.func_prime(y_hat), model.w2.T) *
|
45 |
-
model.func_prime(node1),
|
46 |
-
)
|
47 |
-
dw2 = np.dot(
|
48 |
-
node1.T,
|
49 |
-
error * model.func_prime(y_hat),
|
50 |
-
)
|
51 |
-
db2 = np.sum(error * model.func_prime(y_hat), axis=0)
|
52 |
-
db1 = np.sum(
|
53 |
-
np.dot(error * model.func_prime(y_hat), model.w2.T) * model.func_prime(node1), axis=0,
|
54 |
-
)
|
55 |
-
|
56 |
-
# update weights & biases using gradient descent.
|
57 |
-
# this is -= and not += because if the gradient descent
|
58 |
-
# is positive, we want to go down.
|
59 |
-
model.w1 -= (model.learning_rate * dw1)
|
60 |
-
model.w2 -= (model.learning_rate * dw2)
|
61 |
-
model.b1 -= (model.learning_rate * db1)
|
62 |
-
model.b2 -= (model.learning_rate * db2)
|
63 |
-
|
64 |
-
model.set_loss_hist(loss_hist=loss_history)
|
65 |
-
return model
|
66 |
-
|
67 |
-
|
68 |
-
def compute_node(arr, w, b, func):
|
69 |
-
return func(np.dot(arr, w) + b)
|
70 |
-
|
71 |
-
|
72 |
-
def mse(y: np.array, y_hat: np.array):
|
73 |
-
return np.mean((y - y_hat) ** 2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/main.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from sklearn.model_selection import train_test_split
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
from neural_network.opts import activation
|
5 |
-
from neural_network.backprop import bp
|
6 |
-
|
7 |
-
|
8 |
-
def init(X: np.array, hidden_size: int):
|
9 |
-
"""
|
10 |
-
returns a dictionary containing randomly initialized
|
11 |
-
weights and biases to start off the neural_network
|
12 |
-
"""
|
13 |
-
return {
|
14 |
-
"w1": np.random.randn(X.shape[1], hidden_size),
|
15 |
-
"b1": np.zeros((1, hidden_size)),
|
16 |
-
"w2": np.random.randn(hidden_size, 3), # Output layer has 3 neurons
|
17 |
-
"b2": np.zeros((1, 3)), # Output layer has 3 neurons
|
18 |
-
}
|
19 |
-
|
20 |
-
|
21 |
-
def main(
|
22 |
-
X: np.array,
|
23 |
-
y: np.array,
|
24 |
-
args,
|
25 |
-
) -> None:
|
26 |
-
wb = init(X, args["hidden_size"])
|
27 |
-
act = activation[args["activation_func"]]
|
28 |
-
args["activation_func"] = act["main"]
|
29 |
-
args["func_prime"] = act["prime"]
|
30 |
-
X_train, X_test, y_train, y_test = train_test_split(
|
31 |
-
X,
|
32 |
-
y,
|
33 |
-
test_size=0.2,
|
34 |
-
random_state=8675309,
|
35 |
-
)
|
36 |
-
|
37 |
-
model = bp(
|
38 |
-
X_train=X_train,
|
39 |
-
y_train=y_train,
|
40 |
-
wb=wb,
|
41 |
-
args=args,
|
42 |
-
)
|
43 |
-
|
44 |
-
# evaluate the model and return final results
|
45 |
-
model.eval(
|
46 |
-
X_test=X_test,
|
47 |
-
y_test=y_test,
|
48 |
-
)
|
49 |
-
|
50 |
-
model.plot()
|
51 |
-
return model.to_dict()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/neural_network.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass, field
|
2 |
-
from matplotlib import pyplot as plt
|
3 |
-
import matplotlib
|
4 |
-
import seaborn as sns
|
5 |
-
from typing import Callable
|
6 |
-
import numpy as np
|
7 |
-
import base64
|
8 |
-
import io
|
9 |
-
|
10 |
-
sns.set()
|
11 |
-
matplotlib.use("Agg")
|
12 |
-
|
13 |
-
|
14 |
-
@dataclass
|
15 |
-
class NeuralNetwork:
|
16 |
-
epochs: int
|
17 |
-
learning_rate: float
|
18 |
-
activation_func: Callable
|
19 |
-
func_prime: Callable
|
20 |
-
hidden_size: int
|
21 |
-
w1: np.array
|
22 |
-
w2: np.array
|
23 |
-
b1: np.array
|
24 |
-
b2: np.array
|
25 |
-
|
26 |
-
mse: float = 0
|
27 |
-
loss_history: list = field(
|
28 |
-
default_factory=lambda: [],
|
29 |
-
)
|
30 |
-
plt_data = None
|
31 |
-
|
32 |
-
def predict(self, x: np.array) -> np.array:
|
33 |
-
n1 = self.compute_node(x, self.w1, self.b1, self.activation_func)
|
34 |
-
return self.compute_node(n1, self.w2, self.b2, self.activation_func)
|
35 |
-
|
36 |
-
def set_loss_hist(self, loss_hist: list) -> None:
|
37 |
-
self.loss_history = loss_hist
|
38 |
-
|
39 |
-
def eval(self, X_test, y_test) -> None:
|
40 |
-
self.mse = np.mean((self.predict(X_test) - y_test) ** 2)
|
41 |
-
|
42 |
-
def set_plot_data(self, plot_data):
|
43 |
-
self.plt_data = plot_data
|
44 |
-
|
45 |
-
def plot(self):
|
46 |
-
sns.set()
|
47 |
-
fig, ax = plt.subplots()
|
48 |
-
sns.lineplot(
|
49 |
-
x=np.arange(len(self.loss_history)),
|
50 |
-
y=self.loss_history,
|
51 |
-
ax=ax,
|
52 |
-
)
|
53 |
-
plt.ylabel("Loss")
|
54 |
-
plt.xlabel("Epoch")
|
55 |
-
plt.title("Loss / Epoch")
|
56 |
-
|
57 |
-
buffer = io.BytesIO()
|
58 |
-
plt.savefig(buffer, format='png')
|
59 |
-
buffer.seek(0)
|
60 |
-
plt_data = buffer.read()
|
61 |
-
plt_data_encoded = base64.b64encode(plt_data).decode('utf-8')
|
62 |
-
self.set_plot_data(plt_data_encoded)
|
63 |
-
plt.close()
|
64 |
-
|
65 |
-
@staticmethod
|
66 |
-
def compute_node(arr, w, b, func) -> np.array:
|
67 |
-
return func(np.dot(arr, w) + b)
|
68 |
-
|
69 |
-
@classmethod
|
70 |
-
def from_dict(cls, dct):
|
71 |
-
return cls(**dct)
|
72 |
-
|
73 |
-
def to_dict(self) -> dict:
|
74 |
-
return {
|
75 |
-
# "w1": self.w1.tolist(),
|
76 |
-
# "w2": self.w2.tolist(),
|
77 |
-
# "b1": self.b1.tolist(),
|
78 |
-
# "b2": self.b2.tolist(),
|
79 |
-
"epochs": self.epochs,
|
80 |
-
"learning_rate": self.learning_rate,
|
81 |
-
"activation_func": self.activation_func.__name__,
|
82 |
-
"func_prime": self.func_prime.__name__,
|
83 |
-
"hidden_size": self.hidden_size,
|
84 |
-
"mse": self.mse,
|
85 |
-
"plt_data": self.plt_data,
|
86 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
neural_network/opts.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
from neural_network.activation import *
|
2 |
-
|
3 |
-
activation = {
|
4 |
-
"relu": {
|
5 |
-
"main": relu,
|
6 |
-
"prime": relu_prime,
|
7 |
-
},
|
8 |
-
|
9 |
-
"sigmoid": {
|
10 |
-
"main": sigmoid,
|
11 |
-
"prime": sigmoid_prime,
|
12 |
-
},
|
13 |
-
|
14 |
-
"tanh": {
|
15 |
-
"main": tanh,
|
16 |
-
"prime": tanh_prime,
|
17 |
-
},
|
18 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
opts.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from neural_network.main import main as nn
|
2 |
-
from cluster.main import main as clust
|
3 |
-
|
4 |
-
options = {
|
5 |
-
"neural-network": nn,
|
6 |
-
"kmeans-clustering": clust,
|
7 |
-
"kmedoid-clustering": clust,
|
8 |
-
"heirarchical-clustering": clust,
|
9 |
-
"dbscan-clustering": clust,
|
10 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
Flask==2.2.3
|
2 |
-
Flask_Cors==3.0.10
|
3 |
-
matplotlib==3.5.3
|
4 |
-
numpy==1.21.6
|
5 |
-
requests==2.28.2
|
6 |
-
scikit_learn==1.0.2
|
7 |
-
seaborn==0.12.2
|
8 |
-
gunicorn==20.1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|