bhulston commited on
Commit
7704f84
1 Parent(s): 3887eee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -60,21 +60,37 @@ for message in st.session_state.messages:
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
63
- prompt = st.chat_input("What kind of class are you looking for?")
64
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
  ### GPT Response
67
  # Display assistant response in chat message container
68
  with st.chat_message("assistant"):
69
  message_placeholder = st.empty()
70
  full_response = ""
71
- assistant_response = "Hello there! How can I assist you today?"
72
  # Simulate stream of response with milliseconds delay
73
  for chunk in assistant_response.split():
74
  full_response += chunk + " "
75
  time.sleep(0.05)
76
  # Add a blinking cursor to simulate typing
77
  message_placeholder.markdown(full_response + "▌")
78
- message_placeholder.markdown(final_response)
79
  # Add assistant response to chat history
80
  st.session_state.messages.append({"role": "assistant", "content": final_response})
 
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
63
+ if prompt := st.chat_input("What kind of class are you looking for?"):
64
+ # Display user message in chat message container
65
+ with st.chat_message("user"):
66
+ st.markdown(prompt)
67
+ # Add user message to chat history
68
+ st.session_state.messages.append({"role": "user", "content": prompt})
69
+
70
+ response = filter_agent(prompt, OPENAI_API)
71
+ query = response
72
+
73
+ response = index.query(
74
+ vector= embeddings.embed_query(query),
75
+ # filter= build_filter(json),
76
+ top_k=5,
77
+ include_metadata=True
78
+ )
79
+ response = reranker(query, response)
80
+ result_query = 'Original Query:' + query + 'Query Results:' + str(response)
81
+ assistant_response = result_agent(result_query, OPENAI_API)
82
 
83
  ### GPT Response
84
  # Display assistant response in chat message container
85
  with st.chat_message("assistant"):
86
  message_placeholder = st.empty()
87
  full_response = ""
 
88
  # Simulate stream of response with milliseconds delay
89
  for chunk in assistant_response.split():
90
  full_response += chunk + " "
91
  time.sleep(0.05)
92
  # Add a blinking cursor to simulate typing
93
  message_placeholder.markdown(full_response + "▌")
94
+ message_placeholder.markdown(full_response)
95
  # Add assistant response to chat history
96
  st.session_state.messages.append({"role": "assistant", "content": final_response})