bhulston commited on
Commit
009017d
1 Parent(s): 7704f84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -14
app.py CHANGED
@@ -60,6 +60,23 @@ for message in st.session_state.messages:
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  if prompt := st.chat_input("What kind of class are you looking for?"):
64
  # Display user message in chat message container
65
  with st.chat_message("user"):
@@ -80,17 +97,17 @@ if prompt := st.chat_input("What kind of class are you looking for?"):
80
  result_query = 'Original Query:' + query + 'Query Results:' + str(response)
81
  assistant_response = result_agent(result_query, OPENAI_API)
82
 
83
- ### GPT Response
84
- # Display assistant response in chat message container
85
- with st.chat_message("assistant"):
86
- message_placeholder = st.empty()
87
- full_response = ""
88
- # Simulate stream of response with milliseconds delay
89
- for chunk in assistant_response.split():
90
- full_response += chunk + " "
91
- time.sleep(0.05)
92
- # Add a blinking cursor to simulate typing
93
- message_placeholder.markdown(full_response + "▌")
94
- message_placeholder.markdown(full_response)
95
- # Add assistant response to chat history
96
- st.session_state.messages.append({"role": "assistant", "content": final_response})
 
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
63
+
64
+ ### GPT Response
65
+ # Display assistant response in chat message container
66
+ with st.chat_message("assistant"):
67
+ message_placeholder = st.empty()
68
+ full_response = ""
69
+ assistant_response = "How can I help you today?"
70
+ # Simulate stream of response with milliseconds delay
71
+ for chunk in assistant_response.split():
72
+ full_response += chunk + " "
73
+ time.sleep(0.05)
74
+ # Add a blinking cursor to simulate typing
75
+ message_placeholder.markdown(full_response + "▌")
76
+ message_placeholder.markdown(full_response)
77
+ # Add assistant response to chat history
78
+ st.session_state.messages.append({"role": "assistant", "content": final_response})
79
+
80
  if prompt := st.chat_input("What kind of class are you looking for?"):
81
  # Display user message in chat message container
82
  with st.chat_message("user"):
 
97
  result_query = 'Original Query:' + query + 'Query Results:' + str(response)
98
  assistant_response = result_agent(result_query, OPENAI_API)
99
 
100
+ if assistant_response:
101
+ with st.chat_message("assistant"):
102
+ message_placeholder = st.empty()
103
+ full_response = ""
104
+ # Simulate stream of response with milliseconds delay
105
+ for chunk in assistant_response.split():
106
+ full_response += chunk + " "
107
+ time.sleep(0.05)
108
+ # Add a blinking cursor to simulate typing
109
+ message_placeholder.markdown(full_response + "▌")
110
+ message_placeholder.markdown(full_response)
111
+ # Add assistant response to chat history
112
+ st.session_state.messages.append({"role": "assistant", "content": final_response})
113
+