awacke1 commited on
Commit
6db0818
1 Parent(s): 0876d8e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -86
app.py CHANGED
@@ -182,49 +182,39 @@ def process_text(user_name, text_input, selected_model, temp_values):
182
  st.markdown(f"{user_name} ({timestamp}): {text_input}")
183
 
184
  with st.chat_message("Assistant"):
185
- try:
186
- if selected_model == "GPT-4o":
187
- completion = client.chat.completions.create(
188
- model=GPT4O_MODEL,
189
- messages=[
190
- {"role": "user", "content": m["message"]}
191
- for m in st.session_state.messages
192
- ],
193
- stream=True,
194
- temperature=temp_values
195
- )
196
- return_text = st.write_stream(completion)
197
- else:
198
- messages = [
199
- {"content": m["message"]}
200
  for m in st.session_state.messages
201
- ]
 
 
 
 
 
 
202
  stream = hf_client.chat.completions.create(
203
  model=model_links[selected_model],
204
- messages=messages,
 
 
 
205
  temperature=temp_values,
206
  stream=True,
207
  max_tokens=3000,
208
  )
209
  return_text = st.write_stream(stream)
210
- except openai.APIError as e:
211
- return_text = f"OpenAI API Error: {str(e)}"
212
- st.error(return_text)
213
- except requests.exceptions.RequestException as e:
214
- return_text = f"Network Error: {str(e)}"
215
- st.error(return_text)
216
- except Exception as e:
217
- return_text = f"Unexpected Error: {str(e)}"
218
- st.error(return_text)
219
 
220
- if not return_text.startswith("Error:"):
221
- st.markdown(f"Assistant ({timestamp}): {return_text}")
222
- filename = generate_filename(text_input, "md")
223
- create_file(filename, text_input, return_text, user_name, timestamp)
224
- st.session_state.messages.append({"user": "Assistant", "message": return_text, "timestamp": timestamp})
225
- save_data()
226
-
227
- return return_text
228
 
229
  # Function to process image (using GPT-4o)
230
  def process_image(user_name, image_input, user_prompt):
@@ -276,7 +266,6 @@ def process_audio(user_name, audio_input, text_input):
276
  create_file(filename, text_input, transcription.text, user_name, timestamp)
277
  st.session_state.messages.append({"user": "Assistant", "message": transcription.text, "timestamp": timestamp})
278
  save_data()
279
- return transcription.text
280
 
281
  # Function to process video (using GPT-4o)
282
  def process_video(user_name, video_input, user_prompt):
@@ -307,45 +296,6 @@ def process_video(user_name, video_input, user_prompt):
307
  save_data()
308
  return video_response
309
 
310
- # Callback function for text processing
311
- def process_text_callback(column_name):
312
- text_input = st.session_state[f"{column_name}_text"]
313
- selected_model = st.session_state[f"{column_name}_model"]
314
- temp_values = st.session_state[f"{column_name}_temp"]
315
- if text_input:
316
- process_text(st.session_state.current_user['name'], text_input, selected_model, temp_values)
317
- st.session_state[f"{column_name}_text"] = "" # Clear the input after processing
318
-
319
- # Callback function for image processing
320
- def process_image_callback(column_name):
321
- text_input = st.session_state[f"{column_name}_image_text"]
322
- uploaded_files = st.session_state[f"{column_name}_image_upload"]
323
- if text_input and uploaded_files:
324
- for image_input in uploaded_files:
325
- image_bytes = image_input.read()
326
- process_image(st.session_state.current_user['name'], image_bytes, text_input)
327
- st.session_state[f"{column_name}_image_text"] = "" # Clear the input after processing
328
- st.session_state[f"{column_name}_image_upload"] = None # Clear the file uploader
329
-
330
- # Callback function for audio processing
331
- def process_audio_callback(column_name):
332
- text_input = st.session_state[f"{column_name}_audio_text"]
333
- uploaded_files = st.session_state[f"{column_name}_audio_upload"]
334
- if uploaded_files:
335
- for audio_input in uploaded_files:
336
- process_audio(st.session_state.current_user['name'], audio_input, text_input)
337
- st.session_state[f"{column_name}_audio_text"] = "" # Clear the input after processing
338
- st.session_state[f"{column_name}_audio_upload"] = None # Clear the file uploader
339
-
340
- # Callback function for video processing
341
- def process_video_callback(column_name):
342
- text_input = st.session_state[f"{column_name}_video_text"]
343
- video_input = st.session_state[f"{column_name}_video_upload"]
344
- if video_input and text_input:
345
- process_video(st.session_state.current_user['name'], video_input, text_input)
346
- st.session_state[f"{column_name}_video_text"] = "" # Clear the input after processing
347
- st.session_state[f"{column_name}_video_upload"] = None # Clear the file uploader
348
-
349
  # Main function for each column
350
  def main_column(column_name):
351
  st.markdown(f"##### {column_name}")
@@ -355,20 +305,25 @@ def main_column(column_name):
355
  option = st.selectbox(f"Select an option for {column_name}", ("Text", "Image", "Audio", "Video"), key=f"{column_name}_option")
356
 
357
  if option == "Text":
358
- st.text_input(f"Enter your text for {column_name}:", key=f"{column_name}_text")
359
- st.button(f"Process Text for {column_name}", on_click=process_text_callback, args=(column_name,))
 
360
  elif option == "Image":
361
- st.text_input(f"Enter text prompt to use with Image context for {column_name}:", key=f"{column_name}_image_text")
362
- st.file_uploader(f"Upload images for {column_name}", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key=f"{column_name}_image_upload")
363
- st.button(f"Process Image for {column_name}", on_click=process_image_callback, args=(column_name,))
 
 
364
  elif option == "Audio":
365
- st.text_input(f"Enter text prompt to use with Audio context for {column_name}:", key=f"{column_name}_audio_text")
366
- st.file_uploader(f"Upload an audio file for {column_name}", type=["mp3", "wav"], accept_multiple_files=True, key=f"{column_name}_audio_upload")
367
- st.button(f"Process Audio for {column_name}", on_click=process_audio_callback, args=(column_name,))
 
368
  elif option == "Video":
369
- st.file_uploader(f"Upload a video file for {column_name}", type=["mp4"], key=f"{column_name}_video_upload")
370
- st.text_input(f"Enter text prompt to use with Video context for {column_name}:", key=f"{column_name}_video_text")
371
- st.button(f"Process Video for {column_name}", on_click=process_video_callback, args=(column_name,))
 
372
 
373
  # Main Streamlit app
374
  st.title("Personalized Real-Time Chat")
@@ -409,4 +364,5 @@ with col2:
409
 
410
  # Run the Streamlit app
411
  if __name__ == "__main__":
412
- st.markdown("\n[by Aaron Wacker](https://huggingface.co/spaces/awacke1/ChatStreamlitMultiplayer).")
 
 
182
  st.markdown(f"{user_name} ({timestamp}): {text_input}")
183
 
184
  with st.chat_message("Assistant"):
185
+ if selected_model == "GPT-4o":
186
+ completion = client.chat.completions.create(
187
+ model=GPT4O_MODEL,
188
+ messages=[
189
+ {"role": "user", "content": m["message"]}
 
 
 
 
 
 
 
 
 
 
190
  for m in st.session_state.messages
191
+ ],
192
+ stream=True,
193
+ temperature=temp_values
194
+ )
195
+ return_text = st.write_stream(completion)
196
+ else:
197
+ try:
198
  stream = hf_client.chat.completions.create(
199
  model=model_links[selected_model],
200
+ messages=[
201
+ {"role": m["role"], "content": m["content"]}
202
+ for m in st.session_state.messages
203
+ ],
204
  temperature=temp_values,
205
  stream=True,
206
  max_tokens=3000,
207
  )
208
  return_text = st.write_stream(stream)
209
+ except Exception as e:
210
+ return_text = f"Error: {str(e)}"
211
+ st.error(return_text)
 
 
 
 
 
 
212
 
213
+ st.markdown(f"Assistant ({timestamp}): {return_text}")
214
+ filename = generate_filename(text_input, "md")
215
+ create_file(filename, text_input, return_text, user_name, timestamp)
216
+ st.session_state.messages.append({"user": "Assistant", "message": return_text, "timestamp": timestamp})
217
+ save_data()
 
 
 
218
 
219
  # Function to process image (using GPT-4o)
220
  def process_image(user_name, image_input, user_prompt):
 
266
  create_file(filename, text_input, transcription.text, user_name, timestamp)
267
  st.session_state.messages.append({"user": "Assistant", "message": transcription.text, "timestamp": timestamp})
268
  save_data()
 
269
 
270
  # Function to process video (using GPT-4o)
271
  def process_video(user_name, video_input, user_prompt):
 
296
  save_data()
297
  return video_response
298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
  # Main function for each column
300
  def main_column(column_name):
301
  st.markdown(f"##### {column_name}")
 
305
  option = st.selectbox(f"Select an option for {column_name}", ("Text", "Image", "Audio", "Video"), key=f"{column_name}_option")
306
 
307
  if option == "Text":
308
+ text_input = st.text_input(f"Enter your text for {column_name}:", key=f"{column_name}_text")
309
+ if text_input:
310
+ process_text(st.session_state.current_user['name'], text_input, selected_model, temp_values)
311
  elif option == "Image":
312
+ text_input = st.text_input(f"Enter text prompt to use with Image context for {column_name}:", key=f"{column_name}_image_text")
313
+ uploaded_files = st.file_uploader(f"Upload images for {column_name}", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key=f"{column_name}_image_upload")
314
+ for image_input in uploaded_files:
315
+ image_bytes = image_input.read()
316
+ process_image(st.session_state.current_user['name'], image_bytes, text_input)
317
  elif option == "Audio":
318
+ text_input = st.text_input(f"Enter text prompt to use with Audio context for {column_name}:", key=f"{column_name}_audio_text")
319
+ uploaded_files = st.file_uploader(f"Upload an audio file for {column_name}", type=["mp3", "wav"], accept_multiple_files=True, key=f"{column_name}_audio_upload")
320
+ for audio_input in uploaded_files:
321
+ process_audio(st.session_state.current_user['name'], audio_input, text_input)
322
  elif option == "Video":
323
+ video_input = st.file_uploader(f"Upload a video file for {column_name}", type=["mp4"], key=f"{column_name}_video_upload")
324
+ text_input = st.text_input(f"Enter text prompt to use with Video context for {column_name}:", key=f"{column_name}_video_text")
325
+ if video_input and text_input:
326
+ process_video(st.session_state.current_user['name'], video_input, text_input)
327
 
328
  # Main Streamlit app
329
  st.title("Personalized Real-Time Chat")
 
364
 
365
  # Run the Streamlit app
366
  if __name__ == "__main__":
367
+ st.markdown("*by Aaron Wacker*")
368
+ st.markdown("\n[Aaron Wacker](https://huggingface.co/spaces/awacke1/).")