Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -97,24 +97,18 @@ for message in st.session_state.messages:
|
|
97 |
if "is_streaming" not in st.session_state:
|
98 |
st.session_state.is_streaming = False
|
99 |
|
100 |
-
#
|
101 |
-
if "is_streaming" not in st.session_state:
|
102 |
-
st.session_state.is_streaming = False # Indicate if streaming/processing
|
103 |
-
if "messages" not in st.session_state:
|
104 |
-
st.session_state.messages = []
|
105 |
-
|
106 |
-
# Prevent input during streaming
|
107 |
if st.session_state.is_streaming:
|
108 |
-
st.
|
109 |
else:
|
|
|
110 |
if prompt := st.chat_input("Ask me anything about diabetes"):
|
111 |
-
st.session_state.is_streaming = True #
|
112 |
|
113 |
-
# Display user message
|
114 |
with st.chat_message("user"):
|
115 |
st.markdown(prompt)
|
116 |
|
117 |
-
#
|
118 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
119 |
|
120 |
instructions = """
|
@@ -130,25 +124,30 @@ else:
|
|
130 |
You will answer as if you are talking to a patient directly
|
131 |
"""
|
132 |
|
133 |
-
# Full prompt
|
134 |
full_prompt = f"<s>[INST] {prompt} [/INST] {instructions}</s>"
|
135 |
|
136 |
-
#
|
137 |
with st.chat_message("assistant"):
|
|
|
138 |
stream = client.chat.completions.create(
|
139 |
model=model_links[selected_model],
|
140 |
messages=[
|
141 |
-
{"role": "
|
142 |
-
|
143 |
],
|
144 |
-
stream=True,
|
145 |
temperature=temp_values,
|
|
|
146 |
max_tokens=1024,
|
147 |
)
|
|
|
|
|
|
|
|
|
148 |
|
149 |
-
response = st.write_stream(stream).replace("</s>", "").strip()
|
150 |
st.markdown(response)
|
151 |
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
97 |
if "is_streaming" not in st.session_state:
|
98 |
st.session_state.is_streaming = False
|
99 |
|
100 |
+
# Chat input handling
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
if st.session_state.is_streaming:
|
102 |
+
st.chat_input("The assistant is currently responding. Please wait...") # Inform the user to wait
|
103 |
else:
|
104 |
+
# If not streaming, allow user input
|
105 |
if prompt := st.chat_input("Ask me anything about diabetes"):
|
106 |
+
st.session_state.is_streaming = True # Set the flag to indicate streaming has started
|
107 |
|
|
|
108 |
with st.chat_message("user"):
|
109 |
st.markdown(prompt)
|
110 |
|
111 |
+
# Add the user message to chat history
|
112 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
113 |
|
114 |
instructions = """
|
|
|
124 |
You will answer as if you are talking to a patient directly
|
125 |
"""
|
126 |
|
|
|
127 |
full_prompt = f"<s>[INST] {prompt} [/INST] {instructions}</s>"
|
128 |
|
129 |
+
# Display assistant response in chat message container
|
130 |
with st.chat_message("assistant"):
|
131 |
+
# Stream the response
|
132 |
stream = client.chat.completions.create(
|
133 |
model=model_links[selected_model],
|
134 |
messages=[
|
135 |
+
{"role": m["role"], "content": full_prompt}
|
136 |
+
for m in st.session_state.messages
|
137 |
],
|
|
|
138 |
temperature=temp_values,
|
139 |
+
stream=True,
|
140 |
max_tokens=1024,
|
141 |
)
|
142 |
+
response = st.write_stream(stream)
|
143 |
+
|
144 |
+
# Process and clean the response
|
145 |
+
response = response.replace('</s>', '').strip() # Clean unnecessary characters
|
146 |
|
|
|
147 |
st.markdown(response)
|
148 |
|
149 |
+
# Indicate that streaming is complete
|
150 |
+
st.session_state.is_streaming = False
|
151 |
+
|
152 |
+
# Store the final response
|
153 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|