Christopher Capobianco commited on
Commit
80fa379
1 Parent(s): 4438759

Add Urban Planning project

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  models/music_model.keras filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  models/music_model.keras filter=lfs diff=lfs merge=lfs -text
37
+ assets/*.html filter=lfs diff=lfs merge=lfs -text
Home.py CHANGED
@@ -15,6 +15,7 @@ mv = Image.open("assets/movie.jpg")
15
  sm = Image.open("assets/stock-market.png")
16
  mu = Image.open("assets/music.jpg")
17
  llm = Image.open("assets/llm.png")
 
18
 
19
  with st.container():
20
  text_column, image_column = st.columns((3,1))
@@ -80,4 +81,15 @@ with st.container():
80
  - The base model is Meta's Llama 3.1 (8B) Instruct
81
  """)
82
  with image_column:
83
- st.image(llm)
 
 
 
 
 
 
 
 
 
 
 
 
15
  sm = Image.open("assets/stock-market.png")
16
  mu = Image.open("assets/music.jpg")
17
  llm = Image.open("assets/llm.png")
18
+ ear = Image.open("assets/earthquake.png")
19
 
20
  with st.container():
21
  text_column, image_column = st.columns((3,1))
 
81
  - The base model is Meta's Llama 3.1 (8B) Instruct
82
  """)
83
  with image_column:
84
+ st.image(llm)
85
+
86
+ with st.container():
87
+ text_column, image_column = st.columns((3,1))
88
+ with text_column:
89
+ st.subheader("Urban Planner", divider="green")
90
+ st.markdown("""
91
+ - Display Earthquake and Population Density data
92
+ - Locate areas that need extra earthquake reinforcement
93
+ """)
94
+ with image_column:
95
+ st.image(ear)
app.py CHANGED
@@ -42,6 +42,7 @@ movie_recommendation = st.Page('projects/02_Movie_Recommendation.py', title='Mov
42
  stock_market = st.Page('projects/05_Stock_Market.py', title='Stock Market Forecast')
43
  generative_music = st.Page('projects/06_Generative_Music.py', title='Generative Music')
44
  llm_fine_tune = st.Page('projects/07_LLM_Fine_Tuned.py', title='Fine Tuned LLM')
 
45
 
46
  pg = st.navigation(
47
  {
@@ -54,7 +55,8 @@ pg = st.navigation(
54
  # weather_classification,
55
  stock_market,
56
  generative_music,
57
- llm_fine_tune
 
58
  ]
59
  }
60
  )
 
42
  stock_market = st.Page('projects/05_Stock_Market.py', title='Stock Market Forecast')
43
  generative_music = st.Page('projects/06_Generative_Music.py', title='Generative Music')
44
  llm_fine_tune = st.Page('projects/07_LLM_Fine_Tuned.py', title='Fine Tuned LLM')
45
+ urban_planner = st.Page('projects/08_Urban_Planner.py', title='Urban Planner')
46
 
47
  pg = st.navigation(
48
  {
 
55
  # weather_classification,
56
  stock_market,
57
  generative_music,
58
+ llm_fine_tune,
59
+ urban_planner
60
  ]
61
  }
62
  )
assets/earthquake.html ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0d347235129e56de7901d2a2117ef751f42c37a946e080cafec06c3d17e70f5
3
+ size 28894986
assets/earthquake.png ADDED
projects/08_Urban_Planning.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import streamlit.components.v1 as components
3
+
4
+ st.header('Urban Planning', divider='green')
5
+
6
+ # st.markdown("#### What is a LLM?")
7
+ # st.markdown("LLM stands for Large Language Model, which are mathematical models trained to predict the next set of words based on the prompt provided to it.")
8
+ # st.markdown("In this case we are using [Meta's LLama 3.1 (8B) Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct) as our base model.")
9
+ # st.markdown("#### What is a fine tuning?")
10
+ # st.markdown("Fine tuning is the processes of tweaking the response of the LLM to particular subset of prompts. Most LLMs are trained on a large corpus of generic data, fine tuning just guides the LLM to a specific use case.")
11
+ # st.markdown("In this case I have fine tuned the base model on [Microsoft's Orca Math Word Problems](https://huggingface.co/datasets/microsoft/orca-math-word-problems-200k), so the chatbot below is more of math assistant. Though it occasionally gets the wrong answer, it is willing to try again. I only fine tuned on 1000 math word problems, but I could try to train on the entire dataset in the future.")
12
+ # st.markdown("#### What is quantization?")
13
+ # st.markdown("Most LLM are quite large, often too large to fit into a computer's memory. So ML developers employ Graphic Processing Units (GPUs) with large amounts of memory to train or make use of such LLMs.")
14
+ # st.markdown("However not everyone has access to such resources, so quantization is the process of decreasing the size of a model so that it can fit into memory (GPU or CPU). The process of quantization decreases the precision used to store the models parameters, at the cost of the model's accuracy.")
15
+ # st.markdown("In this case after I fine tuned the LLama 3.1 (8B) base model, it was quantized to 4 bits which offers reasonable model accuracy at 25% the base model size.")
16
+ # st.markdown("*This is based off the tutorial by Abid Ali Awan [Fine-Tuning Llama 3 and Using It Locally: A Step-by-Step Guide](https://www.datacamp.com/tutorial/llama3-fine-tuning-locally)*")
17
+ st.divider()
18
+
19
+ earthquake_file = open("assets/earthquake.html", 'r', encoding='utf-8')
20
+ earthquake_html = earthquake_file.read()
21
+ components.html(earthquake_html)