Spaces:
Sleeping
Sleeping
Jensen-holm
commited on
Commit
β’
75481dd
1
Parent(s):
03f2b37
making it a python package on PyPI
Browse files- README.md +85 -3
- about_package.md +81 -0
- app.py β gradio_app.py +2 -2
- warning.md β gradio_warning.md +0 -0
- requirements.txt +1 -0
- setup.py +21 -0
README.md
CHANGED
@@ -5,15 +5,97 @@ colorFrom: yellow
|
|
5 |
colorTo: blue
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.26.0
|
8 |
-
app_file:
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
The remote added to this repo so that it runs on hugging face spaces
|
16 |
`git remote add space git@hf.co:spaces/Jensen-holm/Numpy-Neuron`
|
17 |
|
18 |
The command to force push to that space
|
19 |
-
`git push --force space main`
|
|
|
5 |
colorTo: blue
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.26.0
|
8 |
+
app_file: gradio_app.py
|
9 |
pinned: false
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
|
14 |
+
# Numpy-Neuron
|
15 |
+
|
16 |
+
A small, simple neural network framework built using only [numpy](https://numpy.org) and python (duh).
|
17 |
+
Here is an example of how to use the package for training a classifier.
|
18 |
+
|
19 |
+
```py
|
20 |
+
from sklearn import datasets
|
21 |
+
from sklearn.preprocessing import OneHotEncoder
|
22 |
+
from sklearn.model_selection import train_test_split
|
23 |
+
from sklearn.metrics import accuracy_score, precision_score, recall_score
|
24 |
+
import numpy as np
|
25 |
+
from nn import (
|
26 |
+
NN,
|
27 |
+
Relu,
|
28 |
+
Sigmoid,
|
29 |
+
CrossEntropyWithLogits,
|
30 |
+
)
|
31 |
+
|
32 |
+
|
33 |
+
RANDOM_SEED = 2
|
34 |
+
|
35 |
+
|
36 |
+
def _preprocess_digits(
|
37 |
+
seed: int,
|
38 |
+
) -> tuple[np.ndarray, ...]:
|
39 |
+
digits = datasets.load_digits(as_frame=False)
|
40 |
+
n_samples = len(digits.images)
|
41 |
+
data = digits.images.reshape((n_samples, -1))
|
42 |
+
y = OneHotEncoder().fit_transform(digits.target.reshape(-1, 1)).toarray()
|
43 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
44 |
+
data,
|
45 |
+
y,
|
46 |
+
test_size=0.2,
|
47 |
+
random_state=seed,
|
48 |
+
)
|
49 |
+
return X_train, X_test, y_train, y_test
|
50 |
+
|
51 |
+
|
52 |
+
def train_nn_classifier() -> None:
|
53 |
+
X_train, X_test, y_train, y_test = _preprocess_digits(seed=RANDOM_SEED)
|
54 |
+
|
55 |
+
nn_classifier = NN(
|
56 |
+
epochs=2_000,
|
57 |
+
hidden_size=16,
|
58 |
+
batch_size=1,
|
59 |
+
learning_rate=0.01,
|
60 |
+
loss_fn=CrossEntropyWithLogits(),
|
61 |
+
hidden_activation_fn=Relu(),
|
62 |
+
output_activation_fn=Sigmoid(),
|
63 |
+
input_size=64, # 8x8 pixel grid images
|
64 |
+
output_size=10, # digits 0-9
|
65 |
+
seed=2,
|
66 |
+
)
|
67 |
+
|
68 |
+
nn_classifier.train(
|
69 |
+
X_train=X_train,
|
70 |
+
y_train=y_train,
|
71 |
+
)
|
72 |
+
|
73 |
+
pred = nn_classifier.predict(X_test=X_test)
|
74 |
+
|
75 |
+
pred = np.argmax(pred, axis=1)
|
76 |
+
y_test = np.argmax(y_test, axis=1)
|
77 |
+
|
78 |
+
accuracy = accuracy_score(y_true=y_test, y_pred=pred)
|
79 |
+
|
80 |
+
print(f"accuracy on validation set: {accuracy:.4f}")
|
81 |
+
|
82 |
+
|
83 |
+
if __name__ == "__main__":
|
84 |
+
train_nn_classifier()
|
85 |
+
```
|
86 |
+
|
87 |
+
|
88 |
+
## Roadmap
|
89 |
+
|
90 |
+
**Optimizers**
|
91 |
+
I would love to add the ability to modify the learning rate over each epoch to ensure
|
92 |
+
that the gradient descent algorithm does not get stuck in local minima as easily.
|
93 |
+
|
94 |
+
|
95 |
+
## Gradio app demo development notes
|
96 |
|
97 |
The remote added to this repo so that it runs on hugging face spaces
|
98 |
`git remote add space git@hf.co:spaces/Jensen-holm/Numpy-Neuron`
|
99 |
|
100 |
The command to force push to that space
|
101 |
+
`git push --force space main`
|
about_package.md
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Numpy-Neuron
|
2 |
+
|
3 |
+
A small, simple neural network framework built using only [numpy](https://numpy.org) and python (duh).
|
4 |
+
Here is an example of how to use the package for training a classifier.
|
5 |
+
|
6 |
+
```py
|
7 |
+
from sklearn import datasets
|
8 |
+
from sklearn.preprocessing import OneHotEncoder
|
9 |
+
from sklearn.model_selection import train_test_split
|
10 |
+
from sklearn.metrics import accuracy_score, precision_score, recall_score
|
11 |
+
import numpy as np
|
12 |
+
from nn import (
|
13 |
+
NN,
|
14 |
+
Relu,
|
15 |
+
Sigmoid,
|
16 |
+
CrossEntropyWithLogits,
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
RANDOM_SEED = 2
|
21 |
+
|
22 |
+
|
23 |
+
def _preprocess_digits(
|
24 |
+
seed: int,
|
25 |
+
) -> tuple[np.ndarray, ...]:
|
26 |
+
digits = datasets.load_digits(as_frame=False)
|
27 |
+
n_samples = len(digits.images)
|
28 |
+
data = digits.images.reshape((n_samples, -1))
|
29 |
+
y = OneHotEncoder().fit_transform(digits.target.reshape(-1, 1)).toarray()
|
30 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
31 |
+
data,
|
32 |
+
y,
|
33 |
+
test_size=0.2,
|
34 |
+
random_state=seed,
|
35 |
+
)
|
36 |
+
return X_train, X_test, y_train, y_test
|
37 |
+
|
38 |
+
|
39 |
+
def train_nn_classifier() -> None:
|
40 |
+
X_train, X_test, y_train, y_test = _preprocess_digits(seed=RANDOM_SEED)
|
41 |
+
|
42 |
+
nn_classifier = NN(
|
43 |
+
epochs=2_000,
|
44 |
+
hidden_size=16,
|
45 |
+
batch_size=1,
|
46 |
+
learning_rate=0.01,
|
47 |
+
loss_fn=CrossEntropyWithLogits(),
|
48 |
+
hidden_activation_fn=Relu(),
|
49 |
+
output_activation_fn=Sigmoid(),
|
50 |
+
input_size=64, # 8x8 pixel grid images
|
51 |
+
output_size=10, # digits 0-9
|
52 |
+
seed=2,
|
53 |
+
)
|
54 |
+
|
55 |
+
nn_classifier.train(
|
56 |
+
X_train=X_train,
|
57 |
+
y_train=y_train,
|
58 |
+
)
|
59 |
+
|
60 |
+
pred = nn_classifier.predict(X_test=X_test)
|
61 |
+
|
62 |
+
pred = np.argmax(pred, axis=1)
|
63 |
+
y_test = np.argmax(y_test, axis=1)
|
64 |
+
|
65 |
+
accuracy = accuracy_score(y_true=y_test, y_pred=pred)
|
66 |
+
|
67 |
+
print(f"accuracy on validation set: {accuracy:.4f}")
|
68 |
+
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
train_nn_classifier()
|
72 |
+
```
|
73 |
+
|
74 |
+
|
75 |
+
## Roadmap
|
76 |
+
|
77 |
+
**Optimizers**
|
78 |
+
I would love to add the ability to modify the learning rate over each epoch to ensure
|
79 |
+
that the gradient descent algorithm does not get stuck in local minima as easily.
|
80 |
+
|
81 |
+
|
app.py β gradio_app.py
RENAMED
@@ -4,7 +4,7 @@ from sklearn.model_selection import train_test_split
|
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
6 |
|
7 |
-
import nn
|
8 |
from vis import ( # classification visualization funcitons
|
9 |
show_digits,
|
10 |
hits_and_misses,
|
@@ -84,7 +84,7 @@ def classification(
|
|
84 |
if __name__ == "__main__":
|
85 |
|
86 |
def _open_warning() -> str:
|
87 |
-
with open("
|
88 |
return f.read()
|
89 |
|
90 |
with gr.Blocks() as interface:
|
|
|
4 |
import numpy as np
|
5 |
import gradio as gr
|
6 |
|
7 |
+
import nn
|
8 |
from vis import ( # classification visualization funcitons
|
9 |
show_digits,
|
10 |
hits_and_misses,
|
|
|
84 |
if __name__ == "__main__":
|
85 |
|
86 |
def _open_warning() -> str:
|
87 |
+
with open("gradio_warning.md", "r") as f:
|
88 |
return f.read()
|
89 |
|
90 |
with gr.Blocks() as interface:
|
warning.md β gradio_warning.md
RENAMED
File without changes
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ matplotlib==3.8.4
|
|
3 |
numpy==1.26.4
|
4 |
plotly==5.21.0
|
5 |
scikit_learn==1.4.2
|
|
|
6 |
tqdm==4.66.2
|
|
|
3 |
numpy==1.26.4
|
4 |
plotly==5.21.0
|
5 |
scikit_learn==1.4.2
|
6 |
+
setuptools==69.5.1
|
7 |
tqdm==4.66.2
|
setup.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
setup(
|
4 |
+
name="numpy_neuron",
|
5 |
+
version="0.3",
|
6 |
+
author="Jensen Holm",
|
7 |
+
author_email="jensen.dev.01@gmail.com",
|
8 |
+
description="Simple, lightweight neural network framework built in numpy",
|
9 |
+
long_description=open("about_package.md").read(),
|
10 |
+
long_description_content_type="text/markdown",
|
11 |
+
url="https://github.com/Jensen-holm/Numpy-Neuron",
|
12 |
+
project_urls={"Bug Tracker": "https://github.com/Jensen-holm/Numpy-Neuron/issues"},
|
13 |
+
package_dir={"": "nn"},
|
14 |
+
packages=find_packages(where="nn"),
|
15 |
+
classifiers=[
|
16 |
+
"Programming Language :: Python :: 3",
|
17 |
+
"License :: OSI Approved :: MIT License",
|
18 |
+
"Operating System :: OS Independent",
|
19 |
+
],
|
20 |
+
python_requires=">=3.6",
|
21 |
+
)
|