kidcoconut
commited on
Commit
·
b831e6f
1
Parent(s):
06c8a33
copied demo files from project github task-5-deployment folder
Browse files- .dockerignore +17 -0
- .gitattributes +4 -0
- .gitignore +141 -0
- Dockerfile +60 -0
- README.md +1 -0
- __init__.py +0 -0
- app.py +26 -0
- bin/images/dbl.png +0 -0
- bin/images/logo_omdena_saudi.png +0 -0
- bin/models/__init__.py +0 -0
- bin/models/util_joinModel.sh +32 -0
- bin/models/util_splitModel.sh +31 -0
- config.toml +3 -0
- main.py +92 -0
- requirements.txt +68 -0
- routes/__init__.py +0 -0
- routes/api/__init__.py +0 -0
- routes/api/rte_api.py +79 -0
- routes/api/rte_tiles.py +198 -0
- routes/api/rte_wsi.py +56 -0
- routes/qa/__init__.py +0 -0
- routes/qa/rte_qa.py +17 -0
- templ/templ_results.html +4 -0
- templ/templ_showDataframe.html +15 -0
- uix/__init__.py +0 -0
- uix/lit_packages.py +31 -0
- uix/lit_sidebar.py +96 -0
- uix/pages/__init__.py +0 -0
- uix/pages/lit_about.py +22 -0
- uix/pages/lit_diagnosis.py +470 -0
- uix/pages/lit_home.py +32 -0
- uix/pages/lit_qaConfigCheck.py +88 -0
- util_dockerPreRun.sh +20 -0
- util_startLocal_streamlitFastApi.sh +21 -0
.dockerignore
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
#--- ignore select binary files/folders
|
3 |
+
bin/images/sample*
|
4 |
+
bin/models/*.pth
|
5 |
+
bin/models/*.zip
|
6 |
+
bin/testing
|
7 |
+
|
8 |
+
|
9 |
+
#--- ignore all local data files; preserve/recreate folder structure
|
10 |
+
data_host_mount
|
11 |
+
data/tiles
|
12 |
+
data/wsi
|
13 |
+
|
14 |
+
|
15 |
+
#--- ignore all files within the _ignore folder
|
16 |
+
_ignore
|
17 |
+
|
.gitattributes
CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*model_a* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
38 |
+
data/demo_tiles/raw/*.tiff filter=lfs diff=lfs merge=lfs -text
|
39 |
+
bin/models/deeplabv3*vhflip30/model_a* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- specific to task5-deploy
|
2 |
+
bin/images/sample*
|
3 |
+
bin/models/*/*.pth
|
4 |
+
bin/models/*.pth
|
5 |
+
bin/models/*.zip
|
6 |
+
bin/testing
|
7 |
+
data/tiles
|
8 |
+
data/wsi
|
9 |
+
data_host_mount
|
10 |
+
_ignore
|
11 |
+
|
12 |
+
|
13 |
+
# Byte-compiled / optimized / DLL files
|
14 |
+
__pycache__/
|
15 |
+
*.py[cod]
|
16 |
+
*$py.class
|
17 |
+
|
18 |
+
# C extensions
|
19 |
+
*.so
|
20 |
+
|
21 |
+
# Distribution / packaging
|
22 |
+
.Python
|
23 |
+
build/
|
24 |
+
develop-eggs/
|
25 |
+
dist/
|
26 |
+
downloads/
|
27 |
+
eggs/
|
28 |
+
.eggs/
|
29 |
+
lib/
|
30 |
+
lib64/
|
31 |
+
parts/
|
32 |
+
sdist/
|
33 |
+
var/
|
34 |
+
wheels/
|
35 |
+
pip-wheel-metadata/
|
36 |
+
share/python-wheels/
|
37 |
+
*.egg-info/
|
38 |
+
.installed.cfg
|
39 |
+
*.egg
|
40 |
+
MANIFEST
|
41 |
+
|
42 |
+
# PyInstaller
|
43 |
+
# Usually these files are written by a python script from a template
|
44 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
45 |
+
*.manifest
|
46 |
+
*.spec
|
47 |
+
|
48 |
+
# Installer logs
|
49 |
+
pip-log.txt
|
50 |
+
pip-delete-this-directory.txt
|
51 |
+
|
52 |
+
# Unit test / coverage reports
|
53 |
+
htmlcov/
|
54 |
+
.tox/
|
55 |
+
.nox/
|
56 |
+
.coverage
|
57 |
+
.coverage.*
|
58 |
+
.cache
|
59 |
+
nosetests.xml
|
60 |
+
coverage.xml
|
61 |
+
*.cover
|
62 |
+
*.py,cover
|
63 |
+
.hypothesis/
|
64 |
+
.pytest_cache/
|
65 |
+
|
66 |
+
# Translations
|
67 |
+
*.mo
|
68 |
+
*.pot
|
69 |
+
|
70 |
+
# Django stuff:
|
71 |
+
*.log
|
72 |
+
local_settings.py
|
73 |
+
db.sqlite3
|
74 |
+
db.sqlite3-journal
|
75 |
+
|
76 |
+
# Flask stuff:
|
77 |
+
instance/
|
78 |
+
.webassets-cache
|
79 |
+
|
80 |
+
# Scrapy stuff:
|
81 |
+
.scrapy
|
82 |
+
|
83 |
+
# Sphinx documentation
|
84 |
+
docs/_build/
|
85 |
+
|
86 |
+
# PyBuilder
|
87 |
+
target/
|
88 |
+
|
89 |
+
# Jupyter Notebook
|
90 |
+
.ipynb_checkpoints
|
91 |
+
|
92 |
+
# IPython
|
93 |
+
profile_default/
|
94 |
+
ipython_config.py
|
95 |
+
|
96 |
+
# pyenv
|
97 |
+
.python-version
|
98 |
+
|
99 |
+
# pipenv
|
100 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
101 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
102 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
103 |
+
# install all needed dependencies.
|
104 |
+
#Pipfile.lock
|
105 |
+
|
106 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
107 |
+
__pypackages__/
|
108 |
+
|
109 |
+
# Celery stuff
|
110 |
+
celerybeat-schedule
|
111 |
+
celerybeat.pid
|
112 |
+
|
113 |
+
# SageMath parsed files
|
114 |
+
*.sage.py
|
115 |
+
|
116 |
+
# Environments
|
117 |
+
.env
|
118 |
+
.venv
|
119 |
+
env/
|
120 |
+
venv/
|
121 |
+
ENV/
|
122 |
+
env.bak/
|
123 |
+
venv.bak/
|
124 |
+
|
125 |
+
# Spyder project settings
|
126 |
+
.spyderproject
|
127 |
+
.spyproject
|
128 |
+
|
129 |
+
# Rope project settings
|
130 |
+
.ropeproject
|
131 |
+
|
132 |
+
# mkdocs documentation
|
133 |
+
/site
|
134 |
+
|
135 |
+
# mypy
|
136 |
+
.mypy_cache/
|
137 |
+
.dmypy.json
|
138 |
+
dmypy.json
|
139 |
+
|
140 |
+
# Pyre type checker
|
141 |
+
.pyre/
|
Dockerfile
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- PREREQS:
|
2 |
+
# - create a local folder dedicated to WSI image mgmt: (docker pwd)/data
|
3 |
+
# - populate the folder with raw data, wsi and tiles
|
4 |
+
# - docker run --name <name> -v <local folder>
|
5 |
+
|
6 |
+
#--- utilize a light linux distro for python apps
|
7 |
+
FROM python:3.10.9-slim-bullseye
|
8 |
+
|
9 |
+
#--- copy only the requirements.txt file
|
10 |
+
#--- set docker image working directory to /app
|
11 |
+
#--- Not: this is reorg'd in an attempt to reduce the rebuilding of layers
|
12 |
+
COPY ./requirements.txt /app/requirements.txt
|
13 |
+
|
14 |
+
#--- set docker image working directory to /app
|
15 |
+
WORKDIR /app
|
16 |
+
|
17 |
+
#--- install all lib dependencies into the image
|
18 |
+
RUN pip3 install -r ./requirements.txt
|
19 |
+
RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 -y
|
20 |
+
|
21 |
+
#--- copy all files from the local pwd to the docker image /app folder
|
22 |
+
#--- .dockerignore: ensure no local data folders or files (images) are copied into the docker image/container
|
23 |
+
COPY . /app
|
24 |
+
|
25 |
+
#--- for streamlit; external 49400; internal 39400
|
26 |
+
# localExec: (from root folder) streamlit run app.py --server.port=39400 --server.maxUploadSize=2000
|
27 |
+
EXPOSE 49400
|
28 |
+
#CMD ["streamlit", "run", "app.py", "--server.port=39400", "--server.maxUploadSize=2000"]
|
29 |
+
|
30 |
+
|
31 |
+
#--- for fastapi; external 49500; internal 39500
|
32 |
+
# localExec: (from root folder) uvicorn main:app --reload --workers 1 --host 0.0.0.0 --port 39500
|
33 |
+
EXPOSE 49500
|
34 |
+
#CMD ["uvicorn", "main:app", "--reload", "--host=0.0.0.0", "--port=39500"]
|
35 |
+
|
36 |
+
#--- start streamlit and fastapi from a helper utility script
|
37 |
+
#CMD ./util_startLocal_streamlitFastApi.sh
|
38 |
+
CMD ./util_dockerPreRun.sh
|
39 |
+
|
40 |
+
|
41 |
+
#--- to build/rebuild the image; make sure you stop and remove the container if you are replacing/upgrading; or change the version tag# from 0.1
|
42 |
+
# docker build -t img_stm_omdenasaudi_hcc:0.1 .
|
43 |
+
|
44 |
+
#--- to tag the image prior to push to DockerHub; docker login and then register user/image:tag
|
45 |
+
#--- to push this image to DockerHub, example based on the repo: kidcoconut73/img_stm_omdenasaudi_hcc
|
46 |
+
# docker tag img_omdenasaudi_hcc:0.1 kidcoconut73/img_stm_omdenasaudi_hcc:demo
|
47 |
+
# docker tag img_omdenasaudi_hcc:0.1 kidcoconut73/img_stm_omdenasaudi_hcc:0.1
|
48 |
+
# docker push kidcoconut73/img_stm_omdenasaudi_hcc:demo
|
49 |
+
|
50 |
+
#--- to run the container from the image; specific port mapping (-p) vs any available port mapping (-P)
|
51 |
+
# docker run -p 49400:39400 -p 49500:39500 --name ctr_stmOmdenaSaudiHcc -v ./data:/app/data img_stm_omdenasaudi_hcc:0.1
|
52 |
+
# docker run -p 49400:39400 -p 49500:39500 --name ctr_stmOmdenaSaudiHcc img_stm_omdenasaudi_hcc:0.1
|
53 |
+
# docker run -P --name ctr_stmOmdenaHcc img_stm_omdenasaudi_hcc:0.1 #--- open all ports defined by Docker EXPOSE
|
54 |
+
|
55 |
+
#--- ISSUE: uvicorn bug does not allow ctl-C break of fastapi through terminal
|
56 |
+
#--- WORKAROUND: you have to run a docker or docker compose kill cmd; eg docker kill <ctr_name>
|
57 |
+
|
58 |
+
|
59 |
+
#--- Docker build log
|
60 |
+
# from python:3.10.9-slim-bullseye size: 4.21gb time: >yyys
|
README.md
CHANGED
@@ -6,6 +6,7 @@ colorTo: green
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: mit
|
|
|
9 |
---
|
10 |
|
11 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: mit
|
9 |
+
app_port: 49400
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
__init__.py
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
toExecute: (from root app folder) ... streamlit run app.py
|
3 |
+
'''
|
4 |
+
import streamlit as st
|
5 |
+
#from uix import lit_sidebar as lit_sideBar
|
6 |
+
import uix.lit_sidebar as litSideBar
|
7 |
+
|
8 |
+
|
9 |
+
#--- streamlit: specify title and logo
|
10 |
+
st.set_page_config(
|
11 |
+
page_title='Omdena Saudi Arabia - Liver HCC Diagnosis with XAI',
|
12 |
+
#page_icon='https://cdn.freebiesupply.com/logos/thumbs/1x/nvidia-logo.png',
|
13 |
+
layout="wide")
|
14 |
+
st.header("Omdena Saudi Arabia - Liver HCC Diagnosis with XAI")
|
15 |
+
st.markdown('---')
|
16 |
+
|
17 |
+
|
18 |
+
#--- streamlit: add a sidebar
|
19 |
+
litSideBar.init()
|
20 |
+
|
21 |
+
|
22 |
+
#if __name__ == '__main__':
|
23 |
+
# st.run("main:app", host="0.0.0.0", port=49300, reload=True)
|
24 |
+
# streamlit run app.py --server.port 49400 --server.maxUploadSize 2000
|
25 |
+
|
26 |
+
#aryPkg[moduleNames.index(page)].run()
|
bin/images/dbl.png
ADDED
bin/images/logo_omdena_saudi.png
ADDED
bin/models/__init__.py
ADDED
File without changes
|
bin/models/util_joinModel.sh
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
<<blkHeader
|
4 |
+
Name: util_joinModel
|
5 |
+
Purpose: reconstitutes a split pyTorch binary model with weights, into a single binary file
|
6 |
+
Usage: ./util_joinModel.sh <source pattern match> <dest model file>
|
7 |
+
- the first arg has to be wrapped in single quotes to ensure that bash does not expand wildcards
|
8 |
+
Prereqs: a model folder within bin/models; containing a split pyTorch model.pth as 1 or more model_nn files
|
9 |
+
Todo: get the parent folder name and use this as the name for the model file
|
10 |
+
blkHeader
|
11 |
+
|
12 |
+
#--- dependencies
|
13 |
+
#none
|
14 |
+
|
15 |
+
|
16 |
+
#--- initialization
|
17 |
+
#--- $1: first arg; source pattern match; eg './bin/models/deeplabv3*vhflip30/model_a*'; Note that this is wildcarded so must be in quotes
|
18 |
+
#--- $n: last arg; dest model file; eg. ./bin/models/model.pth
|
19 |
+
strPth_patternMatch=$1
|
20 |
+
strPth_filMatch=( $strPth_patternMatch ) #--- expand the pattern match; get the first value of the pattern match
|
21 |
+
strPth_parentFld=$(dirname $strPth_filMatch) #--- get the parent dir of the first file match
|
22 |
+
strPth_mdlFile=${@: -1} #--- Note: this gets the last arg; otherwise the 2nd arg would be an iteration of the 1st arg wildcard
|
23 |
+
|
24 |
+
#echo "TRACE: strPth_patternMatch= $strPth_patternMatch"
|
25 |
+
#echo "TRACE: strPth_filMatch= $strPth_filMatch"
|
26 |
+
#echo "TRACE: strPth_parentFld= $strPth_parentFld"
|
27 |
+
#echo "TRACE: strPth_mdlFile= $strPth_mdlFile"
|
28 |
+
|
29 |
+
#--- reconstitute model
|
30 |
+
#--- Note: cat command does not work with single-quote literals; do not reapply single quotes
|
31 |
+
#echo "cat ${strPth_patternMatch} > ${strPth_mdlFile}"
|
32 |
+
cat ${strPth_patternMatch} > ${strPth_mdlFile}
|
bin/models/util_splitModel.sh
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
<<blkHeader
|
4 |
+
Name: util_splitModel
|
5 |
+
Purpose: convenience script to split a single pyTorch .pth model file with weights into smaller 10MB chunks in order to store within github
|
6 |
+
Usage: ./util_splitModel.sh <src model file> <dest folder>
|
7 |
+
- the first arg has to be wrapped in single quotes to ensure that bash does not expand wildcards
|
8 |
+
Prereqs: a pytorch model file
|
9 |
+
Todo: get the parent folder name and use this as the name for the model file
|
10 |
+
blkHeader
|
11 |
+
|
12 |
+
#--- dependencies
|
13 |
+
#none
|
14 |
+
|
15 |
+
|
16 |
+
#--- initialization
|
17 |
+
#--- $1: first arg; the source model file; eg ./bin/models/model.pth
|
18 |
+
#--- $n: last arg; dest model path; eg. ./test_model_folder
|
19 |
+
strPth_mdlFile=$1
|
20 |
+
strPth_mdlFolder=$2
|
21 |
+
strPrefix='/model_'
|
22 |
+
|
23 |
+
#echo "TRACE: strPth_mdlFile= $strPth_mdlFile"
|
24 |
+
echo "TRACE: strPth_mdlFolder= $strPth_mdlFolder"
|
25 |
+
|
26 |
+
#--- ensure the target dir exists
|
27 |
+
mkdir -p $strPth_mdlFolder
|
28 |
+
|
29 |
+
#--- split the model into smaller chunks
|
30 |
+
echo "split -b 10M $strPth_mdlFile $strPth_mdlFolder$strPrefix"
|
31 |
+
split -b 10M $strPth_mdlFile $strPth_mdlFolder$strPrefix
|
config.toml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
[server]
|
2 |
+
|
3 |
+
maxUploadSize = 2000 #--- increased from default 200MB to 2000MB
|
main.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
purpose: fastAPI routing
|
3 |
+
'''
|
4 |
+
|
5 |
+
from fastapi import FastAPI
|
6 |
+
from fastapi.responses import HTMLResponse
|
7 |
+
from fastapi import APIRouter, Request, Response
|
8 |
+
from fastapi.templating import Jinja2Templates
|
9 |
+
import uvicorn
|
10 |
+
|
11 |
+
#--- import custom libraries
|
12 |
+
import lib.utils as libUtils
|
13 |
+
|
14 |
+
|
15 |
+
#--- imported route handlers
|
16 |
+
from routes.api.rte_api import rteApi
|
17 |
+
from routes.api.rte_wsi import rteWsi
|
18 |
+
from routes.api.rte_tiles import rteTiles
|
19 |
+
|
20 |
+
|
21 |
+
#--- fastAPI self doc descriptors
|
22 |
+
description = """
|
23 |
+
Omdena Saudi Arabia: Liver Cancer HCC Diagnosis with XAI
|
24 |
+
|
25 |
+
<insert purpose>
|
26 |
+
|
27 |
+
## key business benefit #1
|
28 |
+
## key business benefit #2
|
29 |
+
## key business benefit #3
|
30 |
+
|
31 |
+
You will be able to:
|
32 |
+
* key feature #1
|
33 |
+
* key feature #2
|
34 |
+
* key feature #3
|
35 |
+
"""
|
36 |
+
|
37 |
+
app = FastAPI(
|
38 |
+
title="App: Omdena Saudi Arabia - Liver Cancer HCC Diagnosis with XAI",
|
39 |
+
description=description,
|
40 |
+
version="0.0.1",
|
41 |
+
terms_of_service="http://example.com/terms/",
|
42 |
+
contact={
|
43 |
+
"name": "Iain McKone",
|
44 |
+
"email": "iain.mckone@gmail.com",
|
45 |
+
},
|
46 |
+
license_info={
|
47 |
+
"name": "Apache 2.0",
|
48 |
+
"url": "https://www.apache.org/licenses/LICENSE-2.0.html",
|
49 |
+
},
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
#--- configure route handlers
|
54 |
+
app.include_router(rteWsi, prefix="/api/wsi")
|
55 |
+
app.include_router(rteTiles, prefix="/api/tiles")
|
56 |
+
app.include_router(rteApi, prefix="/api")
|
57 |
+
|
58 |
+
#app.include_router(rteQa, prefix="/qa")
|
59 |
+
|
60 |
+
|
61 |
+
m_kstrPath_templ = libUtils.pth_templ
|
62 |
+
m_templRef = Jinja2Templates(directory=str(m_kstrPath_templ))
|
63 |
+
|
64 |
+
|
65 |
+
def get_jinja2Templ(request: Request, pdfResults, strParamTitle, lngNumRecords, blnIsTrain=False, blnIsSample=False):
|
66 |
+
lngNumRecords = min(lngNumRecords, libUtils.m_klngMaxRecords)
|
67 |
+
if (blnIsTrain): strParamTitle = strParamTitle + " - Training Data"
|
68 |
+
if (not blnIsTrain): strParamTitle = strParamTitle + " - Test Data"
|
69 |
+
if (blnIsSample): lngNumRecords = libUtils.m_klngSampleSize
|
70 |
+
strParamTitle = strParamTitle + " - max " + str(lngNumRecords) + " rows"
|
71 |
+
|
72 |
+
kstrTempl = 'templ_showDataframe.html'
|
73 |
+
jsonContext = {'request': request,
|
74 |
+
'paramTitle': strParamTitle,
|
75 |
+
'paramDataframe': pdfResults.sample(lngNumRecords).to_html(classes='table table-striped')
|
76 |
+
}
|
77 |
+
result = m_templRef.TemplateResponse(kstrTempl, jsonContext)
|
78 |
+
return result
|
79 |
+
|
80 |
+
|
81 |
+
#--- get main ui/ux entry point
|
82 |
+
@app.get('/')
|
83 |
+
def index():
|
84 |
+
return {
|
85 |
+
"message": "Landing page: Omdena Saudi Arabia - Liver HCC Diagnosis with XAI"
|
86 |
+
}
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
if __name__ == '__main__':
|
91 |
+
uvicorn.run("main:app", host="0.0.0.0", port=49300, reload=True)
|
92 |
+
#CMD ["uvicorn", "main:app", "--host=0.0.0.0", "--reload"]
|
requirements.txt
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- 20230530: commented out all secondary packages as they were causing the huggingfaceSpace to fail
|
2 |
+
|
3 |
+
#altair==4.2.2
|
4 |
+
#anyio==3.6.2
|
5 |
+
#attrs==23.1.0
|
6 |
+
#backports.zoneinfo==0.2.1
|
7 |
+
#blinker==1.6.2
|
8 |
+
#cachetools==5.3.0
|
9 |
+
#certifi==2023.5.7
|
10 |
+
#charset-normalizer==3.1.0
|
11 |
+
#click==8.1.3
|
12 |
+
#decorator==5.1.1
|
13 |
+
#entrypoints==0.4
|
14 |
+
fastapi==0.95.2
|
15 |
+
#gitdb==4.0.10
|
16 |
+
#git-lfs
|
17 |
+
#GitPython==3.1.31
|
18 |
+
grad-cam
|
19 |
+
#h11==0.14.0
|
20 |
+
#idna==3.4
|
21 |
+
#importlib-metadata==6.6.0
|
22 |
+
#importlib-resources==5.12.0
|
23 |
+
#ipython-genutils==0.2.0
|
24 |
+
Jinja2==3.1.2
|
25 |
+
joblib==1.2.0
|
26 |
+
jsonschema==4.17.3
|
27 |
+
#markdown-it-py==2.2.0
|
28 |
+
#MarkupSafe==2.1.2
|
29 |
+
#mdurl==0.1.2
|
30 |
+
numpy==1.24.3
|
31 |
+
#packaging==23.1
|
32 |
+
pandas==1.5.3
|
33 |
+
#Pillow==9.5.0
|
34 |
+
#pkgutil_resolve_name==1.3.10
|
35 |
+
plotly==5.14.1
|
36 |
+
#protobuf==3.20.3
|
37 |
+
#pyarrow==12.0.0
|
38 |
+
#pydantic==1.10.8
|
39 |
+
#pydeck==0.8.1b0
|
40 |
+
#Pygments==2.15.1
|
41 |
+
#Pympler==1.0.1
|
42 |
+
#pyrsistent==0.19.3
|
43 |
+
#python-dateutil==2.8.2
|
44 |
+
#pytz==2023.3
|
45 |
+
#PyYAML==6.0
|
46 |
+
#requests==2.31.0
|
47 |
+
#rich==13.3.5
|
48 |
+
scikit-learn==1.1.1
|
49 |
+
#scipy==1.10.1
|
50 |
+
#six==1.16.0
|
51 |
+
#smmap==5.0.0
|
52 |
+
#sniffio==1.3.0
|
53 |
+
#starlette==0.27.0
|
54 |
+
streamlit==1.24.0
|
55 |
+
#tenacity==8.2.2
|
56 |
+
#threadpoolctl==3.1.0
|
57 |
+
#toml==0.10.2
|
58 |
+
#toolz==0.12.0
|
59 |
+
torch
|
60 |
+
torchvision
|
61 |
+
#tornado==6.3.2
|
62 |
+
#typing_extensions==4.6.2
|
63 |
+
#tzlocal==5.0.1
|
64 |
+
#urllib3==2.0.2
|
65 |
+
uvicorn==0.22.0
|
66 |
+
#validators==0.20.0
|
67 |
+
#watchdog==3.0.0
|
68 |
+
#zipp==3.15.0
|
routes/__init__.py
ADDED
File without changes
|
routes/api/__init__.py
ADDED
File without changes
|
routes/api/rte_api.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Request, Response
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import json
|
6 |
+
|
7 |
+
#import lib.claims as libClaims
|
8 |
+
#from lib.models import mdl_utils, mdl_xgb
|
9 |
+
|
10 |
+
|
11 |
+
rteApi = APIRouter()
|
12 |
+
|
13 |
+
|
14 |
+
#---
|
15 |
+
@rteApi.get('/')
|
16 |
+
def api_entry():
|
17 |
+
return {
|
18 |
+
"message": "api routing - welcome to Omdena Saudi HCC api"
|
19 |
+
}
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
'''
|
24 |
+
#--- >>> SAMPLE CODE BELOW
|
25 |
+
#--- return json for claims data (merged)
|
26 |
+
#--- note: current is kaggle, but future could include from yyyymm filter
|
27 |
+
@rteApi.get('/claims', response_class = JSONResponse)
|
28 |
+
def api_getClaims(request: Request, response: Response):
|
29 |
+
pdfClaims = libClaims.load_claims()
|
30 |
+
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
|
31 |
+
result = json.loads(jsonSample)
|
32 |
+
return result
|
33 |
+
|
34 |
+
|
35 |
+
#--- return json for featEng
|
36 |
+
@rteApi.get('/claims/doFeatEng/', response_class = JSONResponse)
|
37 |
+
def tst_claims_featEng():
|
38 |
+
pdfClaims = libClaims.load_claims()
|
39 |
+
pdfFeatEng = libClaims.do_featEng(pdfClaims)
|
40 |
+
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
|
41 |
+
result = json.loads(jsonSample)
|
42 |
+
return result
|
43 |
+
|
44 |
+
|
45 |
+
@rteApi.get('/claims/doStdScaling/', response_class = JSONResponse)
|
46 |
+
def tst_claims_stdScaling():
|
47 |
+
pdfClaims = libClaims.load_claims()
|
48 |
+
pdfFeatEng = libClaims.do_featEng(pdfClaims)
|
49 |
+
pdfScaled = mdl_utils.doClaims_stdScaler_toPdf(pdfFeatEng)
|
50 |
+
|
51 |
+
jsonSample = pdfClaims.head(50).to_json(orient="records", indent=4)
|
52 |
+
result = json.loads(jsonSample)
|
53 |
+
return result
|
54 |
+
|
55 |
+
|
56 |
+
@rteApi.get('/claims/predict/superv', response_class = JSONResponse)
|
57 |
+
@rteApi.get('/claims/predict/xgb', response_class = JSONResponse)
|
58 |
+
def predict_xgb():
|
59 |
+
#--- load test data
|
60 |
+
pdfClaims = libClaims.load_claims()
|
61 |
+
pdfFeatEng = libClaims.do_featEng(pdfClaims)
|
62 |
+
|
63 |
+
npaScaled = mdl_utils.do_stdScaler(pdfFeatEng)
|
64 |
+
pdfScaled = mdl_utils.do_stdScaler_toPdf(npaScaled)
|
65 |
+
|
66 |
+
ndaPredict = mdl_xgb.predict(npaScaled)
|
67 |
+
pdfPredict = pd.DataFrame(ndaPredict)
|
68 |
+
|
69 |
+
#--- stitch the grouped data with the labels
|
70 |
+
pdfResults = pdfScaled.copy()
|
71 |
+
pdfResults.insert(0, "hasAnom?", pdfPredict[0])
|
72 |
+
|
73 |
+
#--- filter to only those rows that are flagged with an anomaly
|
74 |
+
pdfResults = pdfResults[pdfResults['hasAnom?'] > 0]
|
75 |
+
|
76 |
+
jsonSample = pdfResults.head(50).to_json(orient="records", indent=4)
|
77 |
+
result = json.loads(jsonSample)
|
78 |
+
return result
|
79 |
+
'''
|
routes/api/rte_tiles.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Request, Response
|
2 |
+
from fastapi.responses import HTMLResponse
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
import os
|
6 |
+
|
7 |
+
|
8 |
+
import main as libMain
|
9 |
+
from lib import utils as libUtils
|
10 |
+
|
11 |
+
|
12 |
+
m_kstrFile = __file__
|
13 |
+
m_blnTraceOn = True
|
14 |
+
|
15 |
+
m_kstrPath_templ = libUtils.pth_templ
|
16 |
+
|
17 |
+
|
18 |
+
rteTiles = APIRouter()
|
19 |
+
|
20 |
+
|
21 |
+
#---
|
22 |
+
@rteTiles.get('/')
|
23 |
+
def api_tiles():
|
24 |
+
return {
|
25 |
+
"message": "tiles api endpoint - welcome to the endpoint for tile image processing"
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
#---
|
30 |
+
@rteTiles.get('/raw/upload')
|
31 |
+
def api_tilesRawUpload():
|
32 |
+
'''
|
33 |
+
process an array of uploaded raw Tiles (from external app path)
|
34 |
+
- cleanup all old raw images in /data/tiles/raw
|
35 |
+
- save uploads to /data/tiles/raw
|
36 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
37 |
+
- create array of tile class objs
|
38 |
+
- return(s) json
|
39 |
+
- ack tile/raw uploads with info/attribs
|
40 |
+
'''
|
41 |
+
return {
|
42 |
+
"message": "tilesRawUpload endpoint - file processing of raw tile images"
|
43 |
+
}
|
44 |
+
|
45 |
+
|
46 |
+
@rteTiles.get('/raw/norm')
|
47 |
+
def api_tilesRawNormalize(strPthTile):
|
48 |
+
'''
|
49 |
+
process an array of uploaded raw Tiles (from internal app path)
|
50 |
+
- cleanup all old norm images in /data/tiles/norm
|
51 |
+
- process tile normalization ops
|
52 |
+
- save norm tiles to /data/tiles/norm
|
53 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
54 |
+
- return(s) json
|
55 |
+
- ack tile/norms with info/attribs
|
56 |
+
'''
|
57 |
+
#--- get file attributes
|
58 |
+
strFilPath, strFilName = os.path.split(strPthTile)
|
59 |
+
strPthRaw = strPthTile
|
60 |
+
|
61 |
+
#--- load the tile as a binary object
|
62 |
+
with open(strPthRaw,"rb") as filRaw:
|
63 |
+
imgRaw = filRaw.read()
|
64 |
+
|
65 |
+
#--- Resize Tiles to 256x256
|
66 |
+
#--- Note: imgTile is a buffer object.
|
67 |
+
aryNp = np.frombuffer(imgRaw, np.uint8)
|
68 |
+
imgTemp = cv2.imdecode(aryNp, cv2.IMREAD_COLOR)
|
69 |
+
imgResized = cv2.resize(imgTemp, (256, 256))
|
70 |
+
|
71 |
+
#--- save the normalized file
|
72 |
+
imgNorm = imgResized
|
73 |
+
strPthNorm = "data/tiles/norm", strFilName
|
74 |
+
with open(os.path.join(strPthNorm),"wb") as filNorm:
|
75 |
+
filNorm.write(imgResized.buffer)
|
76 |
+
return strPthNorm
|
77 |
+
""" return {
|
78 |
+
"message": "tileRawNorm endpoint - normalization of raw tile images"
|
79 |
+
}
|
80 |
+
"""
|
81 |
+
|
82 |
+
@rteTiles.get('/norm/upload')
|
83 |
+
def api_tilesNormUpload():
|
84 |
+
'''
|
85 |
+
process an array of uploaded norm Tiles (from external app path)
|
86 |
+
- cleanup all old norm images in /data/tiles/norm
|
87 |
+
- save uploads to /data/tiles/norm
|
88 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
89 |
+
- create array of tile class objs
|
90 |
+
- return(s) json
|
91 |
+
- ack tile/norm uploads with info/attribs
|
92 |
+
'''
|
93 |
+
return {
|
94 |
+
"message": "tilesNormUpload endpoint - file processing of norm tile images"
|
95 |
+
}
|
96 |
+
|
97 |
+
|
98 |
+
@rteTiles.get('/norm/preprocess')
|
99 |
+
def api_tilesNormPreprocess():
|
100 |
+
'''
|
101 |
+
preprocess an array of uploaded norm Tiles (from internal app path)
|
102 |
+
- perform remaining pre-processing of tiles prior to model prediction
|
103 |
+
- cleanup all old preproc images in /data/tiles/preproc
|
104 |
+
- save preproc tiles to /data/tiles/preproc
|
105 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
106 |
+
- return(s) json
|
107 |
+
- ack tile/preproc with info/attribs
|
108 |
+
'''
|
109 |
+
return {
|
110 |
+
"message": "tileNormPreprocess endpoint - preprocessing of normalized tile images"
|
111 |
+
}
|
112 |
+
|
113 |
+
|
114 |
+
@rteTiles.get('/preproc/upload')
|
115 |
+
def api_tilesPreprocUpload():
|
116 |
+
'''
|
117 |
+
process an array of uploaded preprocessed Tiles (from external app path)
|
118 |
+
- cleanup all old preproc images in /data/tiles/preproc
|
119 |
+
- save uploads to /data/tiles/preproc
|
120 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
121 |
+
- create array of tile class objs
|
122 |
+
- return(s) json
|
123 |
+
- ack tile/preproc uploads with info/attribs
|
124 |
+
'''
|
125 |
+
return {
|
126 |
+
"message": "tilesPreprocUpload endpoint - manage upload of preprocessed tile images, in prep for modelling/prdictions"
|
127 |
+
}
|
128 |
+
|
129 |
+
|
130 |
+
@rteTiles.get('/preproc/augment')
|
131 |
+
def api_tilesPreprocAugment():
|
132 |
+
'''
|
133 |
+
process an array of uploaded preprocessed tiles (from internal app path)
|
134 |
+
- cleanup all old augmented tiles in /data/tiles/augm
|
135 |
+
- perform augments of tiles prior to model prediction (translation, rotation, transforms)
|
136 |
+
- save augmented tiles to /data/tiles/augm
|
137 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
138 |
+
- return(s) json
|
139 |
+
- ack tile/augm with info/attribs
|
140 |
+
'''
|
141 |
+
return {
|
142 |
+
"message": "tilePreprocAugment endpoint - augment tile images"
|
143 |
+
}
|
144 |
+
|
145 |
+
|
146 |
+
@rteTiles.get('/augm/upload')
|
147 |
+
def api_tilesAugmUpload():
|
148 |
+
'''
|
149 |
+
process an array of augmented tiles (from external app path)
|
150 |
+
- cleanup all old augm images in /data/tiles/augm
|
151 |
+
- save uploads to /data/tiles/augm
|
152 |
+
- create tile class obj; capture file path, size, zoomMagnif, etc
|
153 |
+
- create array of tile class objs
|
154 |
+
- return(s) json
|
155 |
+
- ack tile/augm uploads with info/attribs
|
156 |
+
'''
|
157 |
+
return {
|
158 |
+
"message": "tilesAugmUpload endpoint - manage upload of augmented tile images, in prep for modelling/predictions"
|
159 |
+
}
|
160 |
+
|
161 |
+
|
162 |
+
#---
|
163 |
+
@rteTiles.get('/raw/predict')
|
164 |
+
def api_tileRawPredict():
|
165 |
+
return {
|
166 |
+
"message": "tile_rawPredict api endpoint - welcome to the endpoint for tile predictions"
|
167 |
+
}
|
168 |
+
|
169 |
+
|
170 |
+
#---
|
171 |
+
@rteTiles.get('/norm/segment')
|
172 |
+
def api_tileNormPredict():
|
173 |
+
return {
|
174 |
+
"message": "tile_normPredict api endpoint - welcome to the endpoint for tile predictions"
|
175 |
+
}
|
176 |
+
|
177 |
+
#---
|
178 |
+
@rteTiles.get('/norm/predict')
|
179 |
+
def api_tileNormPredict():
|
180 |
+
return {
|
181 |
+
"message": "tile_normPredict api endpoint - welcome to the endpoint for tile predictions"
|
182 |
+
}
|
183 |
+
|
184 |
+
|
185 |
+
#---
|
186 |
+
@rteTiles.get('/preproc/predict')
|
187 |
+
def api_tilePreprocPredict():
|
188 |
+
return {
|
189 |
+
"message": "tile_preprocPredict api endpoint - welcome to the endpoint for tile predictions"
|
190 |
+
}
|
191 |
+
|
192 |
+
|
193 |
+
#---
|
194 |
+
@rteTiles.get('/augm/predict')
|
195 |
+
def api_tileAugmPredict():
|
196 |
+
return {
|
197 |
+
"message": "tile_augmPredict api endpoint - welcome to the endpoint for tile predictions"
|
198 |
+
}
|
routes/api/rte_wsi.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, Request, Response
|
2 |
+
from fastapi.responses import HTMLResponse
|
3 |
+
|
4 |
+
|
5 |
+
import main as libMain
|
6 |
+
from lib import utils as libUtils
|
7 |
+
|
8 |
+
|
9 |
+
m_kstrFile = __file__
|
10 |
+
m_blnTraceOn = True
|
11 |
+
|
12 |
+
m_kstrPath_templ = libUtils.pth_templ
|
13 |
+
|
14 |
+
|
15 |
+
rteWsi = APIRouter()
|
16 |
+
|
17 |
+
|
18 |
+
#---
|
19 |
+
@rteWsi.get('/')
|
20 |
+
def api_wsi():
|
21 |
+
return {
|
22 |
+
"message": "wsi api endpoint - welcome to the endpoint for wsi image processing"
|
23 |
+
}
|
24 |
+
|
25 |
+
|
26 |
+
#---
|
27 |
+
@rteWsi.get('/upload')
|
28 |
+
def api_wsiUpload():
|
29 |
+
'''
|
30 |
+
process a single uploaded WSI image (from external app path)
|
31 |
+
- cleanup all old WSI images in /data/wsi/raw
|
32 |
+
- save upload to /data/wsi/raw
|
33 |
+
- create wsi class obj; capture file path, size, zoomMagnif, etc
|
34 |
+
- return(s) json
|
35 |
+
- ack wsi upload with info/attribs
|
36 |
+
'''
|
37 |
+
return {
|
38 |
+
"message": "wsiUpload endpoint - file processing of one uploaded wsi image"
|
39 |
+
}
|
40 |
+
|
41 |
+
|
42 |
+
#---
|
43 |
+
@rteWsi.get('/chunk')
|
44 |
+
def api_wsiChunk():
|
45 |
+
'''
|
46 |
+
process a single WSI image (from internal app path)
|
47 |
+
- create wsi class obj; capture file path, size, zoomMagnif, etc
|
48 |
+
- kick off tile chunking process;
|
49 |
+
- save tiles to /data/tiles/raw
|
50 |
+
- return(s) json
|
51 |
+
- ack wsi upload with info/attribs
|
52 |
+
- ack of tiles created: total count; names, paths, attribs (dimensions)
|
53 |
+
'''
|
54 |
+
return {
|
55 |
+
"message": "wsiLoad endpoint - for chunking of wsi image to one or more tiles"
|
56 |
+
}
|
routes/qa/__init__.py
ADDED
File without changes
|
routes/qa/rte_qa.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
|
3 |
+
|
4 |
+
m_kstrFile = __file__
|
5 |
+
m_blnTraceOn = True
|
6 |
+
|
7 |
+
|
8 |
+
rteQa = APIRouter()
|
9 |
+
|
10 |
+
|
11 |
+
@rteQa.get('/')
|
12 |
+
@rteQa.get('/verif')
|
13 |
+
@rteQa.get('/valid')
|
14 |
+
def qa_entry():
|
15 |
+
return {
|
16 |
+
"message": "qa routing - welcome to Omdena Saudi HCC qa"
|
17 |
+
}
|
templ/templ_results.html
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html>
|
3 |
+
<body>{{ dataframe | safe }}</body>
|
4 |
+
</html>
|
templ/templ_showDataframe.html
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<title>Fourthbrain Capstone: Healthcare Anomalies</title>
|
6 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.0.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-+0n0xVW2eSR5OomGNYDnhzAbDsOXxcvSN1TPprVMTNDbiYZCxYbOOl7+AMvyTG2x" crossorigin="anonymous">
|
7 |
+
</head>
|
8 |
+
<body>
|
9 |
+
|
10 |
+
<h2>{{ paramTitle }}:</h2>
|
11 |
+
|
12 |
+
<!-- Mark data as safe, otherwise it will be rendered as a string -->
|
13 |
+
{{ paramDataframe | safe }}
|
14 |
+
</body>
|
15 |
+
</html>
|
uix/__init__.py
ADDED
File without changes
|
uix/lit_packages.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import importlib
|
2 |
+
|
3 |
+
|
4 |
+
#--- return a list of streamlit packages/pages to render
|
5 |
+
def packages():
|
6 |
+
#---
|
7 |
+
ary_pkg = []
|
8 |
+
ary_pkg.extend(['lit_continentData',
|
9 |
+
'lit_countryData'
|
10 |
+
])
|
11 |
+
return ary_pkg
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
def get_aryPkgDescr():
|
16 |
+
#--- load list of pages to display
|
17 |
+
aryDescr = []
|
18 |
+
aryPkgs = []
|
19 |
+
|
20 |
+
aryModules = packages()
|
21 |
+
for modname in aryModules:
|
22 |
+
m = importlib.import_module('.'+ modname,'uix')
|
23 |
+
aryPkgs.append(m)
|
24 |
+
|
25 |
+
#--- use the module description attribute if it exists
|
26 |
+
#--- otherwise use the module name
|
27 |
+
try:
|
28 |
+
aryDescr.append(m.description)
|
29 |
+
except:
|
30 |
+
aryDescr.append(modname)
|
31 |
+
return [aryDescr, aryPkgs]
|
uix/lit_sidebar.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import importlib
|
3 |
+
from uix import lit_packages
|
4 |
+
|
5 |
+
from uix.pages import lit_home, lit_about, lit_diagnosis
|
6 |
+
from uix.pages import lit_qaConfigCheck
|
7 |
+
|
8 |
+
|
9 |
+
#--- alt define sidebar pages
|
10 |
+
m_aryPages = {
|
11 |
+
"Home": lit_home, #--- TODO: update
|
12 |
+
"Diagnosis: One Tile": lit_diagnosis,
|
13 |
+
#"QA: File Check": lit_qaConfigCheck,
|
14 |
+
"About": lit_about
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
#--- define module-level vars
|
19 |
+
m_aryModNames = lit_packages.packages()
|
20 |
+
m_aryDescr = []
|
21 |
+
m_aryMods = []
|
22 |
+
|
23 |
+
|
24 |
+
def init():
|
25 |
+
#--- upper panel
|
26 |
+
with st.sidebar:
|
27 |
+
kstrUrl_image = "bin/images/logo_omdena_saudi.png"
|
28 |
+
st.sidebar.image(kstrUrl_image, width=200)
|
29 |
+
#st.sidebar.markdown('Omdena Saudi - Liver HCC Diagnosis with XAI')
|
30 |
+
|
31 |
+
|
32 |
+
#--- init checkboxes
|
33 |
+
strKey = st.sidebar.radio("", list(m_aryPages.keys()))
|
34 |
+
pagSel = m_aryPages[strKey]
|
35 |
+
writePage(pagSel)
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
def init_selectBox():
|
40 |
+
#--- init module array of page names, and descr
|
41 |
+
init_modDescrAry()
|
42 |
+
|
43 |
+
# Display the sidebar with a menu of apps
|
44 |
+
kstrMsg = """
|
45 |
+
__Claims Anomaly Views__
|
46 |
+
"""
|
47 |
+
with st.sidebar:
|
48 |
+
st.markdown('---')
|
49 |
+
st.markdown(kstrMsg)
|
50 |
+
page = st.selectbox('Select:', m_aryModNames, format_func=fmt_modName)
|
51 |
+
|
52 |
+
#--- display sidebar footer
|
53 |
+
with st.sidebar:
|
54 |
+
st.markdown('---')
|
55 |
+
st.write('Developed by Chavarria, McKone, Sharma')
|
56 |
+
st.write('Contact at iain.mckone@gmail.com')
|
57 |
+
|
58 |
+
# Run the chosen app
|
59 |
+
m_aryMods[m_aryModNames.index(page)].run()
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
def init_modDescrAry():
|
64 |
+
#--- init global array of page names, and descr
|
65 |
+
#--- note: you need to specify global scope for fxns to access module-level variables
|
66 |
+
global m_aryMods
|
67 |
+
global m_aryDescr
|
68 |
+
|
69 |
+
m_aryMods = []
|
70 |
+
m_aryDescr = []
|
71 |
+
for modName in m_aryModNames:
|
72 |
+
modTemp = importlib.import_module('.'+modName,'uix')
|
73 |
+
m_aryMods.append(modTemp)
|
74 |
+
|
75 |
+
#--- If the module has a description attribute use that in the
|
76 |
+
#--- select box otherwise use the module name
|
77 |
+
try:
|
78 |
+
m_aryDescr.append(modTemp.description)
|
79 |
+
except:
|
80 |
+
m_aryDescr.append(modName)
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
#--- display the app descriptions instead of the module names in the selctbox
|
85 |
+
def fmt_modName(strName):
|
86 |
+
global m_aryModNames
|
87 |
+
global m_aryDescr
|
88 |
+
return m_aryDescr[m_aryModNames.index(strName)]
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
def writePage(uixFile):
|
93 |
+
#--- writes out the page for the selected combo
|
94 |
+
|
95 |
+
# _reload_module(page)
|
96 |
+
uixFile.run()
|
uix/pages/__init__.py
ADDED
File without changes
|
uix/pages/lit_about.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- about page
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
description = "About"
|
5 |
+
def run():
|
6 |
+
|
7 |
+
print("\nINFO (lit_about.run) loading ", description, " page ...")
|
8 |
+
|
9 |
+
#---
|
10 |
+
#st.experimental_memo.clear() #--- try to clear cache each time this page is hit
|
11 |
+
#st.cache_data.clear()
|
12 |
+
|
13 |
+
st.markdown('### About')
|
14 |
+
st.markdown('### Omdena Saudi: Liver HCC Diagnosis with XAI')
|
15 |
+
st.markdown('#### Chapter Lead: Dr. Shai')
|
16 |
+
|
17 |
+
st.markdown(
|
18 |
+
"""
|
19 |
+
About page
|
20 |
+
""",
|
21 |
+
unsafe_allow_html=True,
|
22 |
+
)
|
uix/pages/lit_diagnosis.py
ADDED
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- anomaly detection - supervised page
|
2 |
+
import streamlit as st
|
3 |
+
import pandas as pd
|
4 |
+
import plotly.express as px
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
import matplotlib.pyplot as plt
|
9 |
+
from PIL import Image
|
10 |
+
import torch
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from torchvision.models.segmentation import deeplabv3_resnet50
|
14 |
+
from torchvision.transforms.functional import to_tensor
|
15 |
+
from pytorch_grad_cam import GradCAM
|
16 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
17 |
+
|
18 |
+
import lib.utils as libUtils
|
19 |
+
|
20 |
+
import sys
|
21 |
+
import os
|
22 |
+
|
23 |
+
description = "Diagnosis"
|
24 |
+
m_kblnTraceOn = True #--- enable/disable module level tracing
|
25 |
+
|
26 |
+
|
27 |
+
#--- model initializations
|
28 |
+
#data_batch_size = 3 #--- decrease the number of images loaded, processed if the notebook crashes due to limited RAM
|
29 |
+
#NUM_EPOCHS = 10 # 50
|
30 |
+
#BATCH_SIZE = data_batch_size
|
31 |
+
NUM_CLASSES = 3
|
32 |
+
|
33 |
+
# path to save model weights
|
34 |
+
#BESTMODEL_PATH = r"model_deeplabv3_r50_full_training_dataset_80-20_split_10epochs_no-norm_vhflip30.pth" #--- path to save model weights
|
35 |
+
BESTMODEL_PATH = r"model.pth"
|
36 |
+
MODEL_FULLPATH = 'bin/models/' + BESTMODEL_PATH
|
37 |
+
model_path = MODEL_FULLPATH
|
38 |
+
|
39 |
+
DEFAULT_DEVICE_TYPE = ('cuda' if torch.cuda.is_available() else 'cpu') #--- cuda if gpu; cpu if on Colab Free
|
40 |
+
DEFAULT_BACKBONE_MODEL = 'r50'
|
41 |
+
backbone_model_name = DEFAULT_BACKBONE_MODEL
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
def run():
|
46 |
+
#--- note: in python, you need to specify global scope for fxns to access module-level variables
|
47 |
+
global m_kbln_TraceOn
|
48 |
+
print("\nINFO (litDiagnosis.run) loading ", description, " page ...")
|
49 |
+
|
50 |
+
|
51 |
+
#--- page settings
|
52 |
+
if (m_kblnTraceOn): print("TRACE1 (litDiagnosis.run): Initialize Page Settings ...")
|
53 |
+
st.header("Single Tile Diagnosis")
|
54 |
+
|
55 |
+
|
56 |
+
#--- provide file drag/drop capability
|
57 |
+
m_blnDisableDragDrop = False
|
58 |
+
if(not m_blnDisableDragDrop):
|
59 |
+
#btnSave = st.button("Save")
|
60 |
+
imgDropped = st.file_uploader("Upload a single Tile", type=["png", "jpg", "tif", "tiff", "img"])
|
61 |
+
m_blnDisableDragDrop = (imgDropped is None)
|
62 |
+
|
63 |
+
|
64 |
+
#if (True):
|
65 |
+
try:
|
66 |
+
|
67 |
+
#--- show:
|
68 |
+
#if (m_kblnTraceOn): print("TRACE (litDiagnosis.run): load WSI ...")
|
69 |
+
if (m_blnDisableDragDrop):
|
70 |
+
#--- load wsi
|
71 |
+
print("")
|
72 |
+
else:
|
73 |
+
#--- display uploaded file details
|
74 |
+
if (m_kblnTraceOn): print("TRACE1 (litDiagnosis.run): Print upload file details ...")
|
75 |
+
st.write(
|
76 |
+
"FileName:", "   ", imgDropped.name, "\n",
|
77 |
+
"FileType:", "   ", imgDropped.type, "\n"
|
78 |
+
)
|
79 |
+
|
80 |
+
#--- display diagnosis results ... format (vertical)
|
81 |
+
#showDiagnosis_vert(imgDropped)
|
82 |
+
showDiagnosis_horiz(imgDropped)
|
83 |
+
|
84 |
+
except TypeError as e:
|
85 |
+
print("ERROR (litDiagnosis.run_typeError1): ", e)
|
86 |
+
|
87 |
+
except:
|
88 |
+
e = sys.exc_info()
|
89 |
+
print("ERROR (litDiagnosis.run_genError1): ", e)
|
90 |
+
|
91 |
+
|
92 |
+
try:
|
93 |
+
|
94 |
+
#--- display WSI
|
95 |
+
#showImg_wsi(img)
|
96 |
+
#st.image("bin/images/sample_wsi.png", use_column_width=True)
|
97 |
+
|
98 |
+
print("")
|
99 |
+
|
100 |
+
except TypeError as e:
|
101 |
+
print("ERROR (litDiagnosis.run_typeError2): ", e)
|
102 |
+
|
103 |
+
except:
|
104 |
+
e = sys.exc_info()
|
105 |
+
print("ERROR (litDiagnosis.run_genError2): ", e)
|
106 |
+
|
107 |
+
|
108 |
+
def showImg_wsi(img):
|
109 |
+
print("")
|
110 |
+
|
111 |
+
|
112 |
+
def readyModel_getPreds(imgDropped):
|
113 |
+
print("TRACE: save raw tile ...")
|
114 |
+
strPth_tilRaw = save_tilRaw(imgDropped)
|
115 |
+
|
116 |
+
#--- ready the model
|
117 |
+
print("TRACE: ready base model ...")
|
118 |
+
mdlBase = readyBaseModel()
|
119 |
+
print("TRACE: ready model with weights ...")
|
120 |
+
mdlWeights = readyModelWithWeights(mdlBase)
|
121 |
+
print("TRACE: ready model with xai ...")
|
122 |
+
mdlXai = readyModelWithXAI(mdlWeights)
|
123 |
+
|
124 |
+
#--- get the XAI weighted prediction
|
125 |
+
print("TRACE: get xai weighted pred ...")
|
126 |
+
output_pred, tns_batch = predXai_tile(mdlXai, strPth_tilRaw)
|
127 |
+
|
128 |
+
#--- get the GRADCAM predictions
|
129 |
+
print("TRACE: get GRADCAM preds ...")
|
130 |
+
cam_img_bg, cam_img_wt, cam_img_vt = predGradCam_tile(output_pred, mdlXai, tns_batch)
|
131 |
+
|
132 |
+
print("TRACE: return readyModel_getPreds ...")
|
133 |
+
return strPth_tilRaw, output_pred, cam_img_bg, cam_img_wt, cam_img_vt
|
134 |
+
|
135 |
+
|
136 |
+
def showDiagnosis_horiz(imgDropped):
|
137 |
+
|
138 |
+
#--- copy the uploaded file to data/tiles/raw
|
139 |
+
st.write("#")
|
140 |
+
|
141 |
+
#--- ready the model, get predictions
|
142 |
+
print("TRACE2: ready model ...")
|
143 |
+
strPth_tilRaw, xai_pred, cam_img_bg, cam_img_wt, cam_img_vt = readyModel_getPreds(imgDropped)
|
144 |
+
|
145 |
+
#--- display the raw prediction: headers
|
146 |
+
print("TRACE2: display raw preds, headers ...")
|
147 |
+
colRaw, colPred, colGradBack, colGradWhole, colGradViable = st.columns(5)
|
148 |
+
colRaw.write("Raw Tile")
|
149 |
+
colPred.write("Prediction")
|
150 |
+
colGradBack.write("GradCAM: Background")
|
151 |
+
colGradWhole.write("GradCAM: Whole Tumor")
|
152 |
+
colGradViable.write("GradCAM: Viable Tumor")
|
153 |
+
|
154 |
+
#--- display the raw prediction: images
|
155 |
+
colRaw, colPred, colGradBack, colGradWhole, colGradViable = st.columns(5)
|
156 |
+
showCol_rawTil(colRaw, strPth_tilRaw)
|
157 |
+
showCol_predTil(colPred, xai_pred[0], strPth_tilRaw)
|
158 |
+
showCol_gradCamImg("imgGradCam_bg", colGradBack, cam_img_bg[0])
|
159 |
+
showCol_gradCamImg("imgGradCam_wt", colGradWhole, cam_img_wt[0])
|
160 |
+
showCol_gradCamImg("imgGradCam_vt", colGradViable, cam_img_vt[0])
|
161 |
+
|
162 |
+
|
163 |
+
def showCol_rawTil(colRaw, strPth_tilRaw):
|
164 |
+
print("TRACE3: showCol_rawTil ...")
|
165 |
+
colRaw.image(strPth_tilRaw, width=400, use_column_width=True)
|
166 |
+
|
167 |
+
|
168 |
+
def showCol_predTil(colPred, xai_pred, strPth_tilRaw):
|
169 |
+
kstrPth_tilePred = "data/tiles/pred/"
|
170 |
+
strFilName = os.path.basename(strPth_tilRaw)
|
171 |
+
strFil_tilePred = kstrPth_tilePred + strFilName
|
172 |
+
|
173 |
+
print("TRACE3: showCol_predTil2 ... ", strFil_tilePred)
|
174 |
+
argmax_mask = torch.argmax(xai_pred, dim=0)
|
175 |
+
preds = argmax_mask.cpu().squeeze().numpy()
|
176 |
+
|
177 |
+
cmap = plt.cm.get_cmap('tab10', 3) # Choose a colormap with 3 colors
|
178 |
+
print("TRACE3: typeOf(preds) ...", type(preds))
|
179 |
+
|
180 |
+
print("TRACE3: save pred image ...")
|
181 |
+
plt.imsave(strFil_tilePred, preds, cmap=cmap, vmin=0, vmax=2)
|
182 |
+
|
183 |
+
print("TRACE3: load image ...", strFil_tilePred)
|
184 |
+
colPred.image(strFil_tilePred, width=400, use_column_width=True)
|
185 |
+
|
186 |
+
|
187 |
+
def showCol_gradCamImg(strImgContext, colGradCam, cam_img):
|
188 |
+
print("TRACE3: showCol_gradImg ... ", strImgContext)
|
189 |
+
imgGradCam = Image.fromarray(cam_img)
|
190 |
+
colGradCam.image(imgGradCam, width=400, use_column_width=True)
|
191 |
+
|
192 |
+
|
193 |
+
def showDiagnosis_vert(imgDropped):
|
194 |
+
|
195 |
+
#--- copy the uploaded file to data/tiles/raw
|
196 |
+
st.write("#")
|
197 |
+
|
198 |
+
#--- ready the model, get predictions
|
199 |
+
strPth_tilRaw, xai_pred, cam_img_bg, cam_img_wt, cam_img_vt = readyModel_getPreds(imgDropped)
|
200 |
+
|
201 |
+
#--- display all predictions
|
202 |
+
'''
|
203 |
+
strPth_tilPred = save_tilPred(output_pred)
|
204 |
+
strPth_tilGradBg = save_tilGradBg(cam_img_bg)
|
205 |
+
strPth_tilGradWt = None
|
206 |
+
strPth_tilGradVt = None
|
207 |
+
'''
|
208 |
+
|
209 |
+
#--- display the raw image
|
210 |
+
lstDescr = ["Raw Tile", "Prediction", "GradCam: Background", "GradCam: Whole Tumor", "GradCam: Viable Tumor"]
|
211 |
+
lstImages = [strPth_tilRaw, strPth_tilRaw, strPth_tilRaw, strPth_tilRaw, strPth_tilRaw]
|
212 |
+
|
213 |
+
#--- display the raw prediction
|
214 |
+
for imgIdx in range(len(lstImages)):
|
215 |
+
colDescr, colImage = st.columns([0.25, 0.75])
|
216 |
+
colDescr.write(lstDescr[imgIdx])
|
217 |
+
colImage.image(lstImages[imgIdx], width=400, use_column_width=True)
|
218 |
+
|
219 |
+
|
220 |
+
def save_tilRaw(imgDropped):
|
221 |
+
#--- copy the uploaded raw Tile to data/tiles/raw
|
222 |
+
kstrPth_tileRaw = "data/tiles/raw/"
|
223 |
+
strFil_tileRaw = kstrPth_tileRaw + imgDropped.name
|
224 |
+
with open(strFil_tileRaw,"wb") as filUpload:
|
225 |
+
filUpload.write(imgDropped.getbuffer())
|
226 |
+
print("TRACE: uploaded file saved to ", strFil_tileRaw)
|
227 |
+
return strFil_tileRaw
|
228 |
+
|
229 |
+
|
230 |
+
def prepare_model(backbone_model="mbv3", num_classes=2):
|
231 |
+
|
232 |
+
# Initialize model with pre-trained weights.
|
233 |
+
weights = 'DEFAULT'
|
234 |
+
if backbone_model == "mbv3":
|
235 |
+
model = None
|
236 |
+
#model = deeplabv3_mobilenet_v3_large(weights=weights)
|
237 |
+
|
238 |
+
elif backbone_model == "r50":
|
239 |
+
model = deeplabv3_resnet50(weights=weights)
|
240 |
+
|
241 |
+
elif backbone_model == "r101":
|
242 |
+
model = None
|
243 |
+
#model = deeplabv3_resnet101(weights=weights)
|
244 |
+
|
245 |
+
else:
|
246 |
+
raise ValueError("Wrong backbone model passed. Must be one of 'mbv3', 'r50' and 'r101' ")
|
247 |
+
|
248 |
+
# Update the number of output channels for the output layer.
|
249 |
+
# This will remove the pre-trained weights for the last layer.
|
250 |
+
model.classifier[-1] = nn.Conv2d(model.classifier[-1].in_channels, num_classes, kernel_size=1)
|
251 |
+
model.aux_classifier[-1] = nn.Conv2d(model.aux_classifier[-1].in_channels, num_classes, kernel_size=1)
|
252 |
+
return model
|
253 |
+
|
254 |
+
|
255 |
+
# computes IoU or Dice index
|
256 |
+
def intermediate_metric_calculation(predictions, targets, use_dice=False,
|
257 |
+
smooth=1e-6, dims=(2, 3)):
|
258 |
+
# dims corresponding to image height and width: [B, C, H, W].
|
259 |
+
|
260 |
+
# Intersection: |G ∩ P|. Shape: (batch_size, num_classes)
|
261 |
+
intersection = (predictions * targets).sum(dim=dims) + smooth
|
262 |
+
|
263 |
+
# Summation: |G| + |P|. Shape: (batch_size, num_classes).
|
264 |
+
summation = (predictions.sum(dim=dims) + targets.sum(dim=dims)) + smooth
|
265 |
+
|
266 |
+
if use_dice:
|
267 |
+
# Dice Shape: (batch_size, num_classes)
|
268 |
+
metric = (2.0 * intersection) / summation
|
269 |
+
else:
|
270 |
+
# Union. Shape: (batch_size, num_classes)
|
271 |
+
union = summation - intersection + smooth
|
272 |
+
|
273 |
+
# IoU Shape: (batch_size, num_classes)
|
274 |
+
metric = intersection / union
|
275 |
+
|
276 |
+
# Compute the mean over the remaining axes (batch and classes).
|
277 |
+
# Shape: Scalar
|
278 |
+
total = metric.mean()
|
279 |
+
|
280 |
+
#print(f"iou = {total}")
|
281 |
+
return total
|
282 |
+
|
283 |
+
|
284 |
+
def convert_2_onehot(matrix, num_classes=3):
|
285 |
+
'''
|
286 |
+
Perform one-hot encoding across the channel dimension.
|
287 |
+
'''
|
288 |
+
matrix = matrix.permute(0, 2, 3, 1)
|
289 |
+
matrix = torch.argmax(matrix, dim=-1)
|
290 |
+
matrix = torch.nn.functional.one_hot(matrix, num_classes=num_classes)
|
291 |
+
matrix = matrix.permute(0, 3, 1, 2)
|
292 |
+
return matrix
|
293 |
+
|
294 |
+
|
295 |
+
#--- I'm using just categorical cross_entropy for now
|
296 |
+
class Loss(nn.Module):
|
297 |
+
def __init__(self):
|
298 |
+
super().__init__()
|
299 |
+
|
300 |
+
def forward(self, predictions, targets):
|
301 |
+
# predictions --> (B, #C, H, W) unnormalized
|
302 |
+
# targets --> (B, #C, H, W) one-hot encoded
|
303 |
+
targets = torch.argmax(targets, dim=1)
|
304 |
+
pixel_loss = F.cross_entropy(predictions, targets, reduction="mean")
|
305 |
+
|
306 |
+
return pixel_loss
|
307 |
+
|
308 |
+
|
309 |
+
class Metric(nn.Module):
|
310 |
+
def __init__(self, num_classes=3, smooth=1e-6, use_dice=False):
|
311 |
+
super().__init__()
|
312 |
+
self.num_classes = num_classes
|
313 |
+
self.smooth = smooth
|
314 |
+
self.use_dice = use_dice
|
315 |
+
|
316 |
+
def forward(self, predictions, targets):
|
317 |
+
# predictions --> (B, #C, H, W) unnormalized
|
318 |
+
# targets --> (B, #C, H, W) one-hot encoded
|
319 |
+
|
320 |
+
# Converting unnormalized predictions into one-hot encoded across channels.
|
321 |
+
# Shape: (B, #C, H, W)
|
322 |
+
predictions = convert_2_onehot(predictions, num_classes=self.num_classes) # one hot encoded
|
323 |
+
metric = intermediate_metric_calculation(predictions, targets, use_dice=self.use_dice, smooth=self.smooth)
|
324 |
+
|
325 |
+
# Compute the mean over the remaining axes (batch and classes). Shape: Scalar
|
326 |
+
return metric
|
327 |
+
|
328 |
+
|
329 |
+
def get_default_device():
|
330 |
+
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
331 |
+
|
332 |
+
|
333 |
+
def readyBaseModel():
|
334 |
+
|
335 |
+
#--- prep model conditions
|
336 |
+
device = get_default_device()
|
337 |
+
model = prepare_model(backbone_model=backbone_model_name, num_classes=NUM_CLASSES)
|
338 |
+
|
339 |
+
metric_name = "iou"
|
340 |
+
use_dice = (True if metric_name == "dice" else False)
|
341 |
+
metric_fn = Metric(num_classes=NUM_CLASSES, use_dice=use_dice).to(device)
|
342 |
+
loss_fn = Loss().to(device)
|
343 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
|
344 |
+
|
345 |
+
return model
|
346 |
+
|
347 |
+
|
348 |
+
def readyModelWithWeights(mdlBase):
|
349 |
+
print("TRACE: loading model with weights ... ", model_path)
|
350 |
+
mdlBase.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')))
|
351 |
+
model_with_weights = mdlBase
|
352 |
+
model_with_weights.eval()
|
353 |
+
return model_with_weights
|
354 |
+
|
355 |
+
|
356 |
+
class SegmentationModelOutputWrapper(torch.nn.Module):
|
357 |
+
def __init__(self, model):
|
358 |
+
super(SegmentationModelOutputWrapper, self).__init__()
|
359 |
+
self.model = model
|
360 |
+
|
361 |
+
def forward(self, x):
|
362 |
+
return self.model(x)["out"]
|
363 |
+
|
364 |
+
|
365 |
+
def readyModelWithXAI(mdlWeighted):
|
366 |
+
model_xai = SegmentationModelOutputWrapper(mdlWeighted)
|
367 |
+
|
368 |
+
model_xai.eval()
|
369 |
+
model_xai.to('cpu')
|
370 |
+
return model_xai
|
371 |
+
|
372 |
+
|
373 |
+
#--- demo: process a single file for validation/demo
|
374 |
+
def val_filToTensor(strPth_fil):
|
375 |
+
img_fil = Image.open(strPth_fil)
|
376 |
+
img_fil = img_fil.convert("RGB")
|
377 |
+
img_fil = np.asarray(img_fil)/255
|
378 |
+
return to_tensor(img_fil).unsqueeze(0)
|
379 |
+
|
380 |
+
|
381 |
+
#--- TODO demo: process a batch of files for validation/demo
|
382 |
+
def val_aryToTensor(pth_fil, ary_fils):
|
383 |
+
aryTensor = []
|
384 |
+
for str_filName in ary_fils:
|
385 |
+
aryTensor.append(val_filToTensor(pth_fil, str_filName))
|
386 |
+
return aryTensor
|
387 |
+
|
388 |
+
|
389 |
+
def predXai_tile(mdl_xai, strPth_tileRaw):
|
390 |
+
#--- run a prediction for a single
|
391 |
+
print("TRACE: get tensor from single file ... ", strPth_tileRaw)
|
392 |
+
val_tensorFil = val_filToTensor(strPth_tileRaw)
|
393 |
+
val_tensorBatch = val_tensorFil
|
394 |
+
|
395 |
+
print("TRACE: get mdl_xai prediction ...")
|
396 |
+
output = mdl_xai(val_tensorBatch.float().to('cpu'))
|
397 |
+
|
398 |
+
print("TRACE: predXai_tile return ...")
|
399 |
+
return output, val_tensorBatch
|
400 |
+
|
401 |
+
|
402 |
+
class SemanticSegmentationTarget:
|
403 |
+
def __init__(self, category, mask):
|
404 |
+
self.category = category
|
405 |
+
self.mask = torch.from_numpy(mask)
|
406 |
+
if torch.cuda.is_available():
|
407 |
+
self.mask = self.mask.cuda()
|
408 |
+
|
409 |
+
def __call__(self, model_output):
|
410 |
+
return (model_output[self.category, :, : ] * self.mask).sum()
|
411 |
+
|
412 |
+
|
413 |
+
def predGradCam_tile(output_xaiPred, mdl_xai, val_image_batch):
|
414 |
+
print("TRACE: predGradCam initialize ...")
|
415 |
+
cam_img_bg = []
|
416 |
+
cam_img_wt = []
|
417 |
+
cam_img_vt = []
|
418 |
+
|
419 |
+
sem_classes = ['__background__', 'whole_tumor', 'viable_tumor']
|
420 |
+
sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)}
|
421 |
+
|
422 |
+
argmax_mask = torch.argmax(output_xaiPred, dim=1)
|
423 |
+
argmax_mask_np = argmax_mask.cpu().squeeze().numpy()
|
424 |
+
preds = argmax_mask_np
|
425 |
+
|
426 |
+
seg_mask = preds
|
427 |
+
bg_category = sem_class_to_idx["__background__"]
|
428 |
+
bg_mask_float = np.float32(seg_mask == bg_category)
|
429 |
+
wt_category = sem_class_to_idx["whole_tumor"]
|
430 |
+
wt_mask_float = np.float32(seg_mask == wt_category)
|
431 |
+
vt_category = sem_class_to_idx["viable_tumor"]
|
432 |
+
vt_mask_float = np.float32(seg_mask == vt_category)
|
433 |
+
|
434 |
+
target_layers = [mdl_xai.model.backbone.layer4]
|
435 |
+
|
436 |
+
for i in range(len(val_image_batch)):
|
437 |
+
rgb_img = np.float32(val_image_batch[i].permute(1, 2, 0))
|
438 |
+
rgb_tensor = val_image_batch[i].unsqueeze(0).float()
|
439 |
+
|
440 |
+
print("TRACE: process the background ...")
|
441 |
+
targets = [SemanticSegmentationTarget(bg_category, bg_mask_float[i])]
|
442 |
+
with GradCAM(model=mdl_xai,
|
443 |
+
target_layers=target_layers,
|
444 |
+
use_cuda=torch.cuda.is_available()) as cam:
|
445 |
+
|
446 |
+
grayscale_cam = cam(input_tensor = rgb_tensor,
|
447 |
+
targets = targets)[0, :]
|
448 |
+
cam_img_bg.append(show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True))
|
449 |
+
|
450 |
+
print("TRACE: process whole tumors ...")
|
451 |
+
targets = [SemanticSegmentationTarget(wt_category, wt_mask_float[i])]
|
452 |
+
with GradCAM(model=mdl_xai,
|
453 |
+
target_layers=target_layers,
|
454 |
+
use_cuda=torch.cuda.is_available()) as cam:
|
455 |
+
|
456 |
+
grayscale_cam = cam(input_tensor = rgb_tensor,
|
457 |
+
targets = targets)[0, :]
|
458 |
+
cam_img_wt.append(show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True))
|
459 |
+
|
460 |
+
print("TRACE: process viable tumors ...")
|
461 |
+
targets = [SemanticSegmentationTarget(vt_category, vt_mask_float[i])]
|
462 |
+
with GradCAM(model=mdl_xai,
|
463 |
+
target_layers=target_layers,
|
464 |
+
use_cuda=torch.cuda.is_available()) as cam:
|
465 |
+
|
466 |
+
grayscale_cam = cam(input_tensor = rgb_tensor,
|
467 |
+
targets = targets)[0, :]
|
468 |
+
cam_img_vt.append(show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True))
|
469 |
+
|
470 |
+
return cam_img_bg, cam_img_wt, cam_img_vt
|
uix/pages/lit_home.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- about page
|
2 |
+
import streamlit as st
|
3 |
+
|
4 |
+
description = "Home"
|
5 |
+
def run():
|
6 |
+
|
7 |
+
print("\nINFO (lit_home.run) loading ", description, " page ...")
|
8 |
+
|
9 |
+
|
10 |
+
st.markdown('### Home')
|
11 |
+
st.markdown('### Omdena Saudi: Liver HCC Diagnosis with XAI')
|
12 |
+
st.markdown('#### Chapter Lead: Dr. Shai')
|
13 |
+
st.markdown('\
|
14 |
+
<background> \
|
15 |
+
')
|
16 |
+
|
17 |
+
st.markdown('\
|
18 |
+
<basis> \
|
19 |
+
')
|
20 |
+
|
21 |
+
st.markdown('\
|
22 |
+
<claim> \
|
23 |
+
')
|
24 |
+
|
25 |
+
st.markdown(
|
26 |
+
"""
|
27 |
+
|
28 |
+
Home page
|
29 |
+
|
30 |
+
""",
|
31 |
+
unsafe_allow_html=True,
|
32 |
+
)
|
uix/pages/lit_qaConfigCheck.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#--- about page
|
2 |
+
import streamlit as st
|
3 |
+
import sys, os
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
import lib.utils as libUtils
|
7 |
+
|
8 |
+
|
9 |
+
description = "QA: Config Check"
|
10 |
+
def run():
|
11 |
+
|
12 |
+
print("\nINFO (lit_config.run) loading ", description, " page ...")
|
13 |
+
|
14 |
+
#---
|
15 |
+
#st.experimental_memo.clear() #--- try to clear cache each time this page is hit
|
16 |
+
#st.cache_data.clear()
|
17 |
+
|
18 |
+
st.markdown('### Configuration Check')
|
19 |
+
|
20 |
+
#--- check that base folders exist
|
21 |
+
#--- list raw WSIs
|
22 |
+
lstWSI = os.listdir(libUtils.pth_dtaWsi + "raw/")
|
23 |
+
print("TRACE: ", lstWSI)
|
24 |
+
st.dataframe(
|
25 |
+
pd.DataFrame({"Raw WSI": lstWSI,}),
|
26 |
+
use_container_width=True
|
27 |
+
)
|
28 |
+
|
29 |
+
#--- list raw Tiles
|
30 |
+
lstTiles = os.listdir(libUtils.pth_dtaTiles + "raw/")
|
31 |
+
print("TRACE: ", lstTiles)
|
32 |
+
st.dataframe(
|
33 |
+
pd.DataFrame({"Raw Tiles": lstTiles,}),
|
34 |
+
use_container_width=True
|
35 |
+
)
|
36 |
+
|
37 |
+
#--- list raw demo Tiles
|
38 |
+
lstDemo = os.listdir(libUtils.pth_dtaDemoTiles + "raw/")
|
39 |
+
print("TRACE: ", lstDemo)
|
40 |
+
st.dataframe(
|
41 |
+
pd.DataFrame({"Raw Demo Tiles": lstDemo,}),
|
42 |
+
use_container_width=True
|
43 |
+
)
|
44 |
+
|
45 |
+
|
46 |
+
st.markdown('''
|
47 |
+
<style>
|
48 |
+
[data-testid="stMarkdownContainer"] ul{
|
49 |
+
list-style-position: inside;
|
50 |
+
}
|
51 |
+
</style>
|
52 |
+
''', unsafe_allow_html=True)
|
53 |
+
|
54 |
+
|
55 |
+
# st.markdown(
|
56 |
+
# st.footer(
|
57 |
+
# """
|
58 |
+
# Configuration Check page
|
59 |
+
# """,
|
60 |
+
# unsafe_allow_html=True,
|
61 |
+
# )
|
62 |
+
|
63 |
+
cssFooter="""
|
64 |
+
<style>
|
65 |
+
a:link,
|
66 |
+
a:visited{
|
67 |
+
color: blue;
|
68 |
+
background-color: transparent;
|
69 |
+
text-decoration: underline;
|
70 |
+
}
|
71 |
+
a:hover, a:active {
|
72 |
+
color: red;
|
73 |
+
background-color: transparent;
|
74 |
+
text-decoration: underline;
|
75 |
+
}
|
76 |
+
.footer {
|
77 |
+
position: fixed;
|
78 |
+
left: 0; bottom: 0; width: 100%;
|
79 |
+
background-color: white;
|
80 |
+
color: black;
|
81 |
+
text-align: center;
|
82 |
+
}
|
83 |
+
</style>
|
84 |
+
<div class="footer">
|
85 |
+
<p>Configuration Check Page</p>
|
86 |
+
</div>
|
87 |
+
"""
|
88 |
+
st.markdown(cssFooter, unsafe_allow_html=True)
|
util_dockerPreRun.sh
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#--- Note: this file is designed to run locally as well as within docker to prep the environment
|
4 |
+
#--- for volume initialization; ensure folders are in place; assume: we are in the /app folder
|
5 |
+
mkdir -p data/demo_tiles/raw
|
6 |
+
mkdir -p data/tiles/raw data/tiles/pred data/tiles/grad_bg data/tiles/grad_wt data/tiles/grad_vt
|
7 |
+
mkdir -p data/wsi/raw
|
8 |
+
|
9 |
+
|
10 |
+
<<blockComment
|
11 |
+
- the binary model is stored as split files named mdl_nn
|
12 |
+
- this is done to ensure that the model can be stored within gitHub
|
13 |
+
- the split model is recreated on docker container startup using the cat command
|
14 |
+
blockComment
|
15 |
+
|
16 |
+
#--- recreate single model file from its parts, stored within a specific model version folder
|
17 |
+
./bin/models/util_joinModel.sh './bin/models/deeplabv3*vhflip30/model_a*' ./bin/models/model.pth
|
18 |
+
|
19 |
+
#--- run streamlit/fastapi
|
20 |
+
./util_startLocal_streamlitFastApi.sh
|
util_startLocal_streamlitFastApi.sh
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#--- Note: this file is designed to run locally and within docker to prep the environment
|
4 |
+
#--- for volume initialization; ensure folders are in place; assume: we are in the /app folder
|
5 |
+
#mkdir -p data/demo_tiles/raw
|
6 |
+
#mkdir -p data/tiles/raw data/tiles/pred data/tiles/grad_bg data/tiles/grad_wt data/tiles/grad_vt
|
7 |
+
#mkdir -p data/wsi/raw
|
8 |
+
|
9 |
+
#--- for streamlit; external 49400; internal 39400
|
10 |
+
echo "INFO: starting streamlit ..."
|
11 |
+
streamlit run app.py --server.port=39400 --server.maxUploadSize=2000 &
|
12 |
+
|
13 |
+
#--- for fastapi; external 49500; internal 39500
|
14 |
+
echo "INFO: starting fastapi ..."
|
15 |
+
uvicorn main:app --reload --workers 1 --host 0.0.0.0 --port 39500 &
|
16 |
+
|
17 |
+
#--- wait for any process to exit
|
18 |
+
wait -n
|
19 |
+
|
20 |
+
#--- Exit with status of process that exited first
|
21 |
+
exit $?
|