Christopher Capobianco commited on
Commit
69d27e7
1 Parent(s): 0d996ad

Add project description

Browse files
app.py CHANGED
@@ -42,7 +42,7 @@ movie_recommendation = st.Page('projects/02_Movie_Recommendation.py', title='Mov
42
  stock_market = st.Page('projects/05_Stock_Market.py', title='Stock Market Forecast')
43
  generative_music = st.Page('projects/06_Generative_Music.py', title='Generative Music')
44
  llm_fine_tune = st.Page('projects/07_LLM_Fine_Tuned.py', title='Fine Tuned LLM')
45
- urban_safety_planner = st.Page('projects/08_Urban_Safety_Planner.py', title='Urban Planner')
46
 
47
  pg = st.navigation(
48
  {
 
42
  stock_market = st.Page('projects/05_Stock_Market.py', title='Stock Market Forecast')
43
  generative_music = st.Page('projects/06_Generative_Music.py', title='Generative Music')
44
  llm_fine_tune = st.Page('projects/07_LLM_Fine_Tuned.py', title='Fine Tuned LLM')
45
+ urban_safety_planner = st.Page('projects/08_Urban_Safety_Planner.py', title='Urban Safety Planner')
46
 
47
  pg = st.navigation(
48
  {
assets/earthquake.html CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d0d347235129e56de7901d2a2117ef751f42c37a946e080cafec06c3d17e70f5
3
- size 28894986
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:511bb5e66621bfaf5e2a35179281d6940843e4209a2be0432f28c77fe2f0b0e1
3
+ size 28894988
projects/08_Urban_Safety_Planner.py CHANGED
@@ -1,21 +1,24 @@
1
  import streamlit as st
2
  import streamlit.components.v1 as components
3
 
 
 
 
 
 
 
4
  st.header('Urban Planning', divider='green')
5
 
6
- # st.markdown("#### What is a LLM?")
7
- # st.markdown("LLM stands for Large Language Model, which are mathematical models trained to predict the next set of words based on the prompt provided to it.")
8
- # st.markdown("In this case we are using [Meta's LLama 3.1 (8B) Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) as our base model.")
9
- # st.markdown("#### What is a fine tuning?")
10
- # st.markdown("Fine tuning is the processes of tweaking the response of the LLM to particular subset of prompts. Most LLMs are trained on a large corpus of generic data, fine tuning just guides the LLM to a specific use case.")
11
- # st.markdown("In this case I have fine tuned the base model on [Microsoft's Orca Math Word Problems](https://huggingface.co/datasets/microsoft/orca-math-word-problems-200k), so the chatbot below is more of math assistant. Though it occasionally gets the wrong answer, it is willing to try again. I only fine tuned on 1000 math word problems, but I could try to train on the entire dataset in the future.")
12
- # st.markdown("#### What is quantization?")
13
- # st.markdown("Most LLM are quite large, often too large to fit into a computer's memory. So ML developers employ Graphic Processing Units (GPUs) with large amounts of memory to train or make use of such LLMs.")
14
- # st.markdown("However not everyone has access to such resources, so quantization is the process of decreasing the size of a model so that it can fit into memory (GPU or CPU). The process of quantization decreases the precision used to store the models parameters, at the cost of the model's accuracy.")
15
- # st.markdown("In this case after I fine tuned the LLama 3.1 (8B) base model, it was quantized to 4 bits which offers reasonable model accuracy at 25% the base model size.")
16
- # st.markdown("*This is based off the tutorial by Abid Ali Awan [Fine-Tuning Llama 3 and Using It Locally: A Step-by-Step Guide](https://www.datacamp.com/tutorial/llama3-fine-tuning-locally)*")
17
  st.divider()
18
 
19
- earthquake_file = open("assets/earthquake.html", 'r', encoding='utf-8')
20
- earthquake_html = earthquake_file.read()
21
  components.html(earthquake_html)
 
1
  import streamlit as st
2
  import streamlit.components.v1 as components
3
 
4
+ @st.cache_data
5
+ def get_map():
6
+ eq_file = open("assets/earthquake.html", 'r', encoding='utf-8')
7
+ eq_html = eq_file.read()
8
+ return eq_html
9
+
10
  st.header('Urban Planning', divider='green')
11
 
12
+ st.markdown("#### Where Do Earthquakes Occur?")
13
+ st.markdown("Earthquakes typically coincide with techtonic plate boundaries.")
14
+ st.markdown("#### Which Prefectures have the highest Population Density?")
15
+ st.markdown("The three highest prefectures by population density are: Tokyo, Kanagawa and Osaka")
16
+ st.markdown("#### Which Prefectures are Prone to High-Magnitude Earthquakes?")
17
+ st.markdown("Tokyo has the highest population density, and has experienced some earthquakes")
18
+ st.markdown("Osaka is relatively less populated, but has experienced stronger earthquakes than Tokyo")
19
+ st.markdown("Kanagawa's long coastline, as well as relatively high population density and proximity to strong earthquakes, make it a risk for Tsunami")
20
+ st.markdown("*This is based off the [Geospatial Analysis](https://www.kaggle.com/learn/geospatial-analysis) course by Kaggle*")
 
 
21
  st.divider()
22
 
23
+ earthquake_html = get_map()
 
24
  components.html(earthquake_html)