Marcela Malaver
commited on
Commit
•
c71d9bd
0
Parent(s):
Upload demo about teeth segmentation
Browse files- .gitattributes +38 -0
- .gitignore +1 -0
- README.md +37 -0
- app.py +103 -0
- requirements.txt +78 -0
- requirements_basic.txt +7 -0
.gitattributes
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
teeth_1.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
teeth_2.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
teeth_3.png filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
env/*
|
README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Streamlit Teeth Segmentation
|
3 |
+
emoji: 🌖
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: red
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.30.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
|
15 |
+
|
16 |
+
Steps to run locally:
|
17 |
+
```
|
18 |
+
python3 -m venv env # To create an env
|
19 |
+
source env/bin/activate # To activate the env
|
20 |
+
pip3 install -r requirements.txt # To download packages needed
|
21 |
+
streamlit run app.py # To run the app
|
22 |
+
deactivate # To leave the env
|
23 |
+
```
|
24 |
+
|
25 |
+
Some useful commands:
|
26 |
+
```
|
27 |
+
pip3 install streamlit # Only the first time
|
28 |
+
pip3 install torch # Only the first time
|
29 |
+
pip3 freeze > requirements.txt # To export the libraries to a file
|
30 |
+
```
|
31 |
+
|
32 |
+
If you have issues with git push:
|
33 |
+
```
|
34 |
+
git remote get-url origin # Copy this value and then change according the next line
|
35 |
+
git remote set-url origin https://<user_name>:<token>@huggingface.co/<repo_path> # ex. git remote set-url origin https://marcela9409:valid_token@huggingface.co/spaces/marcela9409/streamlit-teeth-segmentation
|
36 |
+
git push
|
37 |
+
```
|
app.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from PIL import Image
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
from huggingface_hub import from_pretrained_keras
|
6 |
+
|
7 |
+
st.header("X-ray segmentation of teeth / Segmentación de dientes con rayos X")
|
8 |
+
st.subheader("Iteration to improve demo / Iteración para mejorar la demo")
|
9 |
+
|
10 |
+
st.markdown(
|
11 |
+
"""
|
12 |
+
Demo for Platzi class / Demo para la clase de Platzi
|
13 |
+
"""
|
14 |
+
)
|
15 |
+
|
16 |
+
## Seleccionamos y cargamos el modelo
|
17 |
+
model_id = "SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net"
|
18 |
+
model = from_pretrained_keras(model_id)
|
19 |
+
|
20 |
+
## Permitimos a la usuaria cargar una imagen
|
21 |
+
image_file = st.file_uploader("Upload your image here / Sube aquí tu imagen", type=["png", "jpg", "jpeg"])
|
22 |
+
|
23 |
+
## Si una imagen tiene más de un canal entonces se convierte a escala de grises (1 canal)
|
24 |
+
def convert_one_channel(img):
|
25 |
+
if len(img.shape) > 2:
|
26 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
27 |
+
return img
|
28 |
+
else:
|
29 |
+
return img
|
30 |
+
|
31 |
+
def convertir_rgb(img):
|
32 |
+
if len(img.shape) == 2:
|
33 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
|
34 |
+
return img
|
35 |
+
else:
|
36 |
+
return img
|
37 |
+
|
38 |
+
|
39 |
+
## Manipularemos la interfaz para que podamos usar imágenes ejemplo
|
40 |
+
## Si el usuario da click en un ejemplo entonces el modelo correrá con él
|
41 |
+
examples = ["teeth_1.png", "teeth_2.png", "teeth_3.png"]
|
42 |
+
|
43 |
+
## Creamos tres columnas; en cada una estará una imagen ejemplo
|
44 |
+
col1, col2, col3 = st.columns(3)
|
45 |
+
with col1:
|
46 |
+
## Se carga la imagen y se muestra en la interfaz
|
47 |
+
ex = Image.open(examples[0])
|
48 |
+
st.image(ex, width=200)
|
49 |
+
## Si oprime el botón entonces usaremos ese ejemplo en el modelo
|
50 |
+
if st.button("Run example 1 / Corre ejemplo 1"):
|
51 |
+
image_file = examples[0]
|
52 |
+
|
53 |
+
with col2:
|
54 |
+
ex1 = Image.open(examples[1])
|
55 |
+
st.image(ex1, width=200)
|
56 |
+
if st.button("Run example 2 / Corre ejemplo 2"):
|
57 |
+
image_file = examples[1]
|
58 |
+
|
59 |
+
with col3:
|
60 |
+
ex2 = Image.open(examples[2])
|
61 |
+
st.image(ex2, width=200)
|
62 |
+
if st.button("Run example 3 / Corre ejemplo 3"):
|
63 |
+
image_file = examples[2]
|
64 |
+
|
65 |
+
## Si tenemos una imagen para ingresar en el modelo entonces
|
66 |
+
## la procesamos e ingresamos al modelo
|
67 |
+
if image_file is not None:
|
68 |
+
## Cargamos la imagen con PIL, la mostramos y la convertimos a un array de NumPy
|
69 |
+
img = Image.open(image_file)
|
70 |
+
st.image(img, width=850)
|
71 |
+
img = np.asarray(img)
|
72 |
+
|
73 |
+
## Procesamos la imagen para ingresarla al modelo
|
74 |
+
img_cv = convert_one_channel(img)
|
75 |
+
img_cv = cv2.resize(img_cv, (512, 512), interpolation=cv2.INTER_LANCZOS4)
|
76 |
+
img_cv = np.float32(img_cv / 255)
|
77 |
+
img_cv = np.reshape(img_cv, (1, 512, 512, 1))
|
78 |
+
|
79 |
+
## Ingresamos el array de NumPy al modelo
|
80 |
+
predicted = model.predict(img_cv)
|
81 |
+
predicted = predicted[0]
|
82 |
+
|
83 |
+
## Regresamos la imagen a su forma original y agregamos las máscaras de la segmentación
|
84 |
+
predicted = cv2.resize(
|
85 |
+
predicted, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LANCZOS4
|
86 |
+
)
|
87 |
+
mask = np.uint8(predicted * 255) #
|
88 |
+
_, mask = cv2.threshold(
|
89 |
+
mask, thresh=0, maxval=255, type=cv2.THRESH_BINARY + cv2.THRESH_OTSU
|
90 |
+
)
|
91 |
+
kernel = np.ones((5, 5), dtype=np.float32)
|
92 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
|
93 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1)
|
94 |
+
cnts, hieararch = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
95 |
+
output = cv2.drawContours(convert_one_channel(img.astype(np.uint8)), cnts, -1, (255, 0, 0), 3)
|
96 |
+
|
97 |
+
## Si obtuvimos exitosamente un resultadod entonces lo mostramos en la interfaz
|
98 |
+
if output is not None:
|
99 |
+
st.subheader("Segmentation / Segmentación:")
|
100 |
+
st.write(output.shape)
|
101 |
+
st.image(output, width=850)
|
102 |
+
|
103 |
+
st.markdown("Thanks for use our demo! / Gracias por usar esta demo")
|
requirements.txt
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.1.0
|
2 |
+
altair==5.2.0
|
3 |
+
astunparse==1.6.3
|
4 |
+
attrs==23.2.0
|
5 |
+
blinker==1.7.0
|
6 |
+
cachetools==5.3.2
|
7 |
+
certifi==2023.11.17
|
8 |
+
charset-normalizer==3.3.2
|
9 |
+
click==8.1.7
|
10 |
+
filelock==3.13.1
|
11 |
+
flatbuffers==23.5.26
|
12 |
+
fsspec==2023.12.2
|
13 |
+
gast==0.5.4
|
14 |
+
gitdb==4.0.11
|
15 |
+
GitPython==3.1.41
|
16 |
+
google-auth==2.27.0
|
17 |
+
google-auth-oauthlib==1.2.0
|
18 |
+
google-pasta==0.2.0
|
19 |
+
grpcio==1.60.0
|
20 |
+
h5py==3.10.0
|
21 |
+
huggingface-hub==0.20.3
|
22 |
+
idna==3.6
|
23 |
+
importlib-metadata==7.0.1
|
24 |
+
Jinja2==3.1.3
|
25 |
+
jsonschema==4.21.1
|
26 |
+
jsonschema-specifications==2023.12.1
|
27 |
+
keras==2.15.0
|
28 |
+
libclang==16.0.6
|
29 |
+
Markdown==3.5.2
|
30 |
+
markdown-it-py==3.0.0
|
31 |
+
MarkupSafe==2.1.4
|
32 |
+
mdurl==0.1.2
|
33 |
+
ml-dtypes==0.2.0
|
34 |
+
numpy==1.26.3
|
35 |
+
oauthlib==3.2.2
|
36 |
+
opencv-python==4.9.0.80
|
37 |
+
opt-einsum==3.3.0
|
38 |
+
packaging==23.2
|
39 |
+
pandas==2.2.0
|
40 |
+
pillow==10.2.0
|
41 |
+
protobuf==4.23.4
|
42 |
+
pyarrow==15.0.0
|
43 |
+
pyasn1==0.5.1
|
44 |
+
pyasn1-modules==0.3.0
|
45 |
+
pydeck==0.8.1b0
|
46 |
+
Pygments==2.17.2
|
47 |
+
python-dateutil==2.8.2
|
48 |
+
pytz==2023.3.post1
|
49 |
+
PyYAML==6.0.1
|
50 |
+
referencing==0.32.1
|
51 |
+
requests==2.31.0
|
52 |
+
requests-oauthlib==1.3.1
|
53 |
+
rich==13.7.0
|
54 |
+
rpds-py==0.17.1
|
55 |
+
rsa==4.9
|
56 |
+
scipy==1.12.0
|
57 |
+
six==1.16.0
|
58 |
+
smmap==5.0.1
|
59 |
+
streamlit==1.30.0
|
60 |
+
tenacity==8.2.3
|
61 |
+
tensorboard==2.15.1
|
62 |
+
tensorboard-data-server==0.7.2
|
63 |
+
tensorflow==2.15.0
|
64 |
+
tensorflow-estimator==2.15.0
|
65 |
+
tensorflow-io-gcs-filesystem==0.35.0
|
66 |
+
termcolor==2.4.0
|
67 |
+
toml==0.10.2
|
68 |
+
toolz==0.12.1
|
69 |
+
tornado==6.4
|
70 |
+
tqdm==4.66.1
|
71 |
+
typing_extensions==4.9.0
|
72 |
+
tzdata==2023.4
|
73 |
+
tzlocal==5.2
|
74 |
+
urllib3==2.1.0
|
75 |
+
validators==0.22.0
|
76 |
+
Werkzeug==3.0.1
|
77 |
+
wrapt==1.14.1
|
78 |
+
zipp==3.17.0
|
requirements_basic.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
Pillow
|
3 |
+
scipy
|
4 |
+
opencv-python
|
5 |
+
tensorflow
|
6 |
+
streamlit
|
7 |
+
huggingface_hub
|