Christopher Capobianco commited on
Commit
938985b
1 Parent(s): 27cd57b

Remove Work in Progress warnings for LLM project

Browse files
Files changed (2) hide show
  1. Home.py +0 -1
  2. projects/07_LLM_Fine_Tuned.py +0 -2
Home.py CHANGED
@@ -63,7 +63,6 @@ with st.container():
63
  text_column, image_column = st.columns((3,1))
64
  with text_column:
65
  st.subheader("Fine Tuned LLM", divider="green")
66
- st.warning("**Work In Progress**")
67
  st.markdown("""
68
  - Fine tuned a LLM to act like math assistant
69
  - The base model is Meta's Llama 3.1 (8B) Instruct
 
63
  text_column, image_column = st.columns((3,1))
64
  with text_column:
65
  st.subheader("Fine Tuned LLM", divider="green")
 
66
  st.markdown("""
67
  - Fine tuned a LLM to act like math assistant
68
  - The base model is Meta's Llama 3.1 (8B) Instruct
projects/07_LLM_Fine_Tuned.py CHANGED
@@ -17,8 +17,6 @@ st.markdown("In this case after I fine tuned the LLama 3.1 (8B) base model, it w
17
  st.markdown("*This is based off the tutorial by Abid Ali Awan [Fine-Tuning Llama 3 and Using It Locally: A Step-by-Step Guide](https://www.datacamp.com/tutorial/llama3-fine-tuning-locally)*")
18
  st.divider()
19
 
20
- st.warning("**Work In Progress**")
21
-
22
  if "messages" not in st.session_state:
23
  st.session_state["messages"] = [{"role": "system", "content": "You are a helpful math assistant."}, {"role": "assistant", "content": "What math problem can I help you with today?"}]
24
 
 
17
  st.markdown("*This is based off the tutorial by Abid Ali Awan [Fine-Tuning Llama 3 and Using It Locally: A Step-by-Step Guide](https://www.datacamp.com/tutorial/llama3-fine-tuning-locally)*")
18
  st.divider()
19
 
 
 
20
  if "messages" not in st.session_state:
21
  st.session_state["messages"] = [{"role": "system", "content": "You are a helpful math assistant."}, {"role": "assistant", "content": "What math problem can I help you with today?"}]
22