oceansweep commited on
Commit
469948b
1 Parent(s): fb85b24

Upload Gradio_Related.py

Browse files
Files changed (1) hide show
  1. App_Function_Libraries/Gradio_Related.py +717 -527
App_Function_Libraries/Gradio_Related.py CHANGED
@@ -23,19 +23,20 @@ import shutil
23
  import tempfile
24
  import uuid
25
  import zipfile
26
- from datetime import datetime, time
27
  import json
28
  import logging
29
  import os.path
30
  from pathlib import Path
31
  import sqlite3
 
32
  from typing import Dict, List, Tuple, Optional
33
  import traceback
34
  from functools import wraps
35
 
36
- import pypandoc
37
  #
38
  # Import 3rd-Party Libraries
 
39
  import yt_dlp
40
  import gradio as gr
41
  #
@@ -43,10 +44,12 @@ import gradio as gr
43
  from App_Function_Libraries.Article_Summarization_Lib import scrape_and_summarize_multiple
44
  from App_Function_Libraries.Audio_Files import process_audio_files, process_podcast, download_youtube_audio
45
  from App_Function_Libraries.Chunk_Lib import improved_chunking_process
46
- from App_Function_Libraries.PDF_Ingestion_Lib import process_and_cleanup_pdf
 
47
  from App_Function_Libraries.Local_LLM_Inference_Engine_Lib import local_llm_gui_function
48
  from App_Function_Libraries.Local_Summarization_Lib import summarize_with_llama, summarize_with_kobold, \
49
- summarize_with_oobabooga, summarize_with_tabbyapi, summarize_with_vllm, summarize_with_local_llm
 
50
  from App_Function_Libraries.Summarization_General_Lib import summarize_with_openai, summarize_with_cohere, \
51
  summarize_with_anthropic, summarize_with_groq, summarize_with_openrouter, summarize_with_deepseek, \
52
  summarize_with_huggingface, perform_summarization, save_transcription_and_summary, \
@@ -57,9 +60,11 @@ from App_Function_Libraries.SQLite_DB import update_media_content, list_prompts,
57
  delete_chat_message, update_chat_message, add_chat_message, get_chat_messages, search_chat_conversations, \
58
  create_chat_conversation, save_chat_history_to_database, view_database, get_transcripts, get_trashed_items, \
59
  user_delete_item, empty_trash, create_automated_backup, backup_dir, db_path, add_or_update_prompt, \
60
- load_prompt_details, load_preset_prompts, insert_prompt_to_db, delete_prompt, search_and_display_items
 
61
  from App_Function_Libraries.Utils import sanitize_filename, extract_text_from_segments, create_download_directory, \
62
- convert_to_seconds, load_comprehensive_config, safe_read_file, downloaded_files
 
63
  from App_Function_Libraries.Video_DL_Ingestion_Lib import parse_and_expand_urls, \
64
  generate_timestamped_url, extract_metadata, download_video
65
 
@@ -68,8 +73,6 @@ from App_Function_Libraries.Video_DL_Ingestion_Lib import parse_and_expand_urls,
68
  # Function Definitions
69
  #
70
 
71
- # I know this is bad, I don't care, this key is set to expire on Aug 19. Until then, it is what it is.
72
- MISTRAL_TOKEN = "p3hw1VRckQl86OjeOtvaOckMfAaernxz"
73
  whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large", "large-v1", "large-v2", "large-v3",
74
  "distil-large-v2", "distil-medium.en", "distil-small.en"]
75
  custom_prompt_input = None
@@ -640,7 +643,23 @@ def create_video_transcription_tab():
640
  visible=False)
641
  with gr.Row():
642
  system_prompt_input = gr.Textbox(label="System Prompt",
643
- value="You are a professional summarizer. Please summarize this video transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
644
  lines=3,
645
  visible=False,
646
  interactive=True)
@@ -670,13 +689,9 @@ def create_video_transcription_tab():
670
 
671
  api_name_input = gr.Dropdown(
672
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
673
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
674
- value="Mistral",
675
- label="API Name (Mandatory)",
676
- )
677
- api_key_input = gr.Textbox(label="API Key (Mandatory)", placeholder="Enter your API key here",
678
- type="password",
679
- value=MISTRAL_TOKEN)
680
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
681
  value="default,no_keyword_set")
682
  batch_size_input = gr.Slider(minimum=1, maximum=10, value=1, step=1,
@@ -1008,7 +1023,8 @@ def create_video_transcription_tab():
1008
  if url_input:
1009
  inputs.extend([url.strip() for url in url_input.split('\n') if url.strip()])
1010
  if video_file is not None:
1011
- inputs.append(video_file.name) # Assuming video_file is a file object with a 'name' attribute
 
1012
 
1013
  if not inputs:
1014
  raise ValueError("No input provided. Please enter URLs or upload a video file.")
@@ -1068,9 +1084,10 @@ def create_video_transcription_tab():
1068
  # Handle URL or local file
1069
  if os.path.isfile(input_item):
1070
  video_file_path = input_item
 
1071
  # Extract basic info from local file
1072
  info_dict = {
1073
- 'webpage_url': input_item,
1074
  'title': os.path.basename(input_item),
1075
  'description': "Local file",
1076
  'channel_url': None,
@@ -1291,7 +1308,23 @@ def create_audio_processing_tab():
1291
  visible=False)
1292
  with gr.Row():
1293
  system_prompt_input = gr.Textbox(label="System Prompt",
1294
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295
  lines=3,
1296
  visible=False)
1297
 
@@ -1321,13 +1354,11 @@ def create_audio_processing_tab():
1321
 
1322
  api_name_input = gr.Dropdown(
1323
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1324
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
1325
- value="Mistral",
1326
  label="API for Summarization (Optional)"
1327
  )
1328
- api_key_input = gr.Textbox(label="API Key (if required)", placeholder="Enter your API key here",
1329
- type="password",
1330
- value=MISTRAL_TOKEN)
1331
  custom_keywords_input = gr.Textbox(label="Custom Keywords", placeholder="Enter custom keywords, comma-separated")
1332
  keep_original_input = gr.Checkbox(label="Keep original audio file", value=False)
1333
 
@@ -1401,7 +1432,23 @@ def create_podcast_tab():
1401
  visible=False)
1402
  with gr.Row():
1403
  system_prompt_input = gr.Textbox(label="System Prompt",
1404
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1405
  lines=3,
1406
  visible=False)
1407
 
@@ -1431,13 +1478,11 @@ def create_podcast_tab():
1431
 
1432
  podcast_api_name_input = gr.Dropdown(
1433
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp",
1434
- "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
1435
- value="Mistral",
1436
  label="API Name for Summarization (Optional)"
1437
  )
1438
- podcast_api_key_input = gr.Textbox(label="API Key (if required)",
1439
- type="password",
1440
- value=MISTRAL_TOKEN)
1441
  podcast_whisper_model_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
1442
 
1443
  keep_original_input = gr.Checkbox(label="Keep original audio file", value=False)
@@ -1525,7 +1570,23 @@ def create_website_scraping_tab():
1525
  visible=False)
1526
  with gr.Row():
1527
  system_prompt_input = gr.Textbox(label="System Prompt",
1528
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1529
  lines=3,
1530
  visible=False)
1531
 
@@ -1555,14 +1616,9 @@ def create_website_scraping_tab():
1555
 
1556
  api_name_input = gr.Dropdown(
1557
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1558
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
1559
- value="Mistral",
1560
- label="API Name (Mandatory for Summarization)"
1561
- )
1562
  api_key_input = gr.Textbox(label="API Key (Mandatory if API Name is specified)",
1563
- placeholder="Enter your API key here; Ignore if using Local API or Built-in API",
1564
- type="password",
1565
- value=MISTRAL_TOKEN)
1566
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
1567
  value="default,no_keyword_set", visible=True)
1568
 
@@ -1607,7 +1663,24 @@ def create_pdf_ingestion_tab():
1607
  visible=False)
1608
  with gr.Row():
1609
  system_prompt_input = gr.Textbox(label="System Prompt",
1610
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611
  lines=3,
1612
  visible=False)
1613
 
@@ -1646,6 +1719,52 @@ def create_pdf_ingestion_tab():
1646
  inputs=[pdf_file_input, pdf_title_input, pdf_author_input, pdf_keywords_input],
1647
  outputs=pdf_result_output
1648
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1649
  #
1650
  #
1651
  ################################################################################################################
@@ -1669,14 +1788,9 @@ def create_resummary_tab():
1669
  with gr.Row():
1670
  api_name_input = gr.Dropdown(
1671
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1672
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
1673
- value="Mistral",
1674
- label="API Name"
1675
- )
1676
- api_key_input = gr.Textbox(label="API Key",
1677
- placeholder="Enter your API key here",
1678
- type="password",
1679
- value=MISTRAL_TOKEN)
1680
 
1681
  chunking_options_checkbox = gr.Checkbox(label="Use Chunking", value=False)
1682
  with gr.Row(visible=False) as chunking_options_box:
@@ -1703,7 +1817,23 @@ def create_resummary_tab():
1703
  visible=False)
1704
  with gr.Row():
1705
  system_prompt_input = gr.Textbox(label="System Prompt",
1706
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1707
  lines=3,
1708
  visible=False)
1709
 
@@ -1774,17 +1904,32 @@ def update_resummarize_dropdown(search_query, search_type):
1774
 
1775
  item_options = [f"{item[1]} ({item[2]})" for item in results]
1776
  item_mapping = {f"{item[1]} ({item[2]})": item[0] for item in results}
 
 
1777
  return gr.update(choices=item_options), item_mapping
1778
 
1779
 
1780
- def resummarize_content_wrapper(selected_item, item_mapping, api_name, api_key, chunking_options_checkbox, chunk_method,
1781
- max_chunk_size, chunk_overlap, custom_prompt_checkbox, custom_prompt):
1782
- if not selected_item or not api_name or not api_key:
 
 
 
1783
  return "Please select an item and provide API details."
1784
 
 
 
 
 
 
 
 
 
 
 
1785
  media_id = item_mapping.get(selected_item)
1786
  if not media_id:
1787
- return "Invalid selection."
1788
 
1789
  content, old_prompt, old_summary = fetch_item_details(media_id)
1790
 
@@ -1794,8 +1939,8 @@ def resummarize_content_wrapper(selected_item, item_mapping, api_name, api_key,
1794
  # Prepare chunking options
1795
  chunk_options = {
1796
  'method': chunk_method,
1797
- 'max_size': int(max_chunk_size),
1798
- 'overlap': int(chunk_overlap),
1799
  'language': 'english',
1800
  'adaptive': True,
1801
  'multi_level': False,
@@ -1804,49 +1949,43 @@ def resummarize_content_wrapper(selected_item, item_mapping, api_name, api_key,
1804
  # Prepare summarization prompt
1805
  summarization_prompt = custom_prompt if custom_prompt_checkbox and custom_prompt else None
1806
 
1807
- # Call the resummary_content function
1808
- result = resummarize_content(media_id, content, api_name, api_key, chunk_options, summarization_prompt)
 
1809
 
1810
  return result
1811
 
1812
 
1813
- def resummarize_content(selected_item, item_mapping, api_name, api_key, chunking_options_checkbox, chunk_method, max_chunk_size, chunk_overlap, custom_prompt_checkbox, custom_prompt):
1814
- if not selected_item or not api_name or not api_key:
1815
- return "Please select an item and provide API details."
1816
-
1817
- media_id = item_mapping.get(selected_item)
1818
- if not media_id:
1819
- return "Invalid selection."
1820
-
1821
- content, old_prompt, old_summary = fetch_item_details(media_id)
1822
-
1823
- if not content:
1824
- return "No content available for re-summarization."
1825
-
1826
  # Load configuration
1827
  config = load_comprehensive_config()
1828
 
1829
- # Prepare chunking options
1830
- chunk_options = {
1831
- 'method': chunk_method,
1832
- 'max_size': int(max_chunk_size),
1833
- 'overlap': int(chunk_overlap),
1834
- 'language': 'english',
1835
- 'adaptive': True,
1836
- 'multi_level': False,
1837
- }
1838
-
1839
  # Chunking logic
1840
- if chunking_options_checkbox:
1841
  chunks = improved_chunking_process(content, chunk_options)
1842
  else:
1843
  chunks = [{'text': content, 'metadata': {}}]
1844
 
1845
- # Prepare summarization prompt
1846
- if custom_prompt_checkbox and custom_prompt:
1847
- summarization_prompt = custom_prompt
1848
- else:
1849
- summarization_prompt = config.get('Prompts', 'default_summary_prompt', fallback="Summarize the following text:")
 
 
 
 
 
 
 
 
 
 
 
 
 
1850
 
1851
  # Summarization logic
1852
  summaries = []
@@ -1868,18 +2007,180 @@ def resummarize_content(selected_item, item_mapping, api_name, api_key, chunking
1868
  new_summary = " ".join(summaries)
1869
 
1870
  # Update the database with the new summary
 
1871
  try:
1872
  update_result = update_media_content(selected_item, item_mapping, content, summarization_prompt, new_summary)
1873
  if "successfully" in update_result.lower():
1874
- return f"Re-summarization complete. New summary: {new_summary[:500]}..."
1875
  else:
1876
  return f"Error during database update: {update_result}"
1877
  except Exception as e:
1878
  logging.error(f"Error updating database: {str(e)}")
1879
  return f"Error updating database: {str(e)}"
1880
 
 
1881
  # End of Re-Summarization Functions
1882
  #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1883
  ############################################################################################################################################################################################################################
1884
  #
1885
  # Transcript Comparison Tab
@@ -1964,9 +2265,6 @@ def create_compare_transcripts_tab():
1964
 
1965
  ### End of under construction section
1966
 
1967
-
1968
-
1969
-
1970
  #
1971
  #
1972
  ###########################################################################################################################################################################################################################
@@ -2475,7 +2773,7 @@ def create_llamafile_settings_tab():
2475
  if current_dir_model:
2476
  return current_dir_model
2477
  elif parent_dir_model:
2478
- return os.path.join("../App_Function_Libraries", parent_dir_model)
2479
  else:
2480
  return ""
2481
 
@@ -2531,9 +2829,6 @@ def create_llamafile_advanced_inputs():
2531
  # Chat Interface Tab Functions
2532
 
2533
 
2534
-
2535
-
2536
- # FIXME - not adding content from selected item to query
2537
  def chat(message, history, media_content, selected_parts, api_endpoint, api_key, prompt, temperature,
2538
  system_message=None):
2539
  try:
@@ -2556,9 +2851,9 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,
2556
 
2557
  # Prepare the input for the API
2558
  if not history:
2559
- input_data = f"{combined_content}\n\nUser: {message}\nAI:"
2560
  else:
2561
- input_data = f"User: {message}\nAI:"
2562
  # Print first 500 chars
2563
  logging.info(f"Debug - Chat Function - Input Data: {input_data[:500]}...")
2564
 
@@ -2595,11 +2890,13 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,
2595
  elif api_endpoint.lower() == "tabbyapi":
2596
  response = summarize_with_tabbyapi(input_data, prompt, temp, system_message)
2597
  elif api_endpoint.lower() == "vllm":
2598
- response = summarize_with_vllm(input_data, prompt, temp, system_message)
2599
  elif api_endpoint.lower() == "local-llm":
2600
  response = summarize_with_local_llm(input_data, prompt, temp, system_message)
2601
  elif api_endpoint.lower() == "huggingface":
2602
  response = summarize_with_huggingface(api_key, input_data, prompt, temp, system_message)
 
 
2603
  else:
2604
  raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
2605
 
@@ -2611,41 +2908,100 @@ def chat(message, history, media_content, selected_parts, api_endpoint, api_key,
2611
 
2612
 
2613
  def save_chat_history_to_db_wrapper(chatbot, conversation_id, media_content):
2614
- logging.info(f"Attempting to save chat history. Media content: {media_content}")
2615
  try:
2616
- # Extract the media_id from the media_content
2617
  media_id = None
2618
- if isinstance(media_content, dict) and 'content' in media_content:
2619
- try:
2620
- content_json = json.loads(media_content['content'])
2621
- # Use the webpage_url as the media_id
2622
- media_id = content_json.get('webpage_url')
2623
- except json.JSONDecodeError:
2624
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2625
 
2626
  if media_id is None:
2627
  # If we couldn't find a media_id, we'll use a placeholder
2628
  media_id = "unknown_media"
2629
  logging.warning(f"Unable to extract media_id from media_content. Using placeholder: {media_id}")
2630
 
 
 
 
 
2631
  # Generate a unique conversation name using media_id and current timestamp
2632
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2633
  conversation_name = f"Chat_{media_id}_{timestamp}"
2634
 
2635
- new_conversation_id = save_chat_history_to_database(chatbot, conversation_id, media_id, conversation_name)
2636
  return new_conversation_id, f"Chat history saved successfully as {conversation_name}!"
2637
  except Exception as e:
2638
  error_message = f"Failed to save chat history: {str(e)}"
2639
- logging.error(error_message)
2640
  return conversation_id, error_message
2641
 
2642
 
2643
- def save_chat_history(history, conversation_id):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2644
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2645
- filename = f"chat_history_{conversation_id}_{timestamp}.json"
 
 
 
 
 
 
 
 
2646
 
2647
  chat_data = {
2648
  "conversation_id": conversation_id,
 
2649
  "timestamp": timestamp,
2650
  "history": [
2651
  {
@@ -2656,17 +3012,25 @@ def save_chat_history(history, conversation_id):
2656
  ]
2657
  }
2658
 
2659
- json_data = json.dumps(chat_data, indent=2)
2660
 
2661
- # Create a temporary file
2662
- with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') as temp_file:
2663
- temp_file.write(json_data)
2664
- temp_file_path = temp_file.name
2665
 
2666
- return temp_file_path
 
 
 
 
 
 
 
 
2667
 
2668
- json_data = json.dumps(chat_data, indent=2)
2669
- return filename, json_data
 
 
 
 
2670
 
2671
  def show_edit_message(selected):
2672
  if selected:
@@ -2750,6 +3114,11 @@ def update_user_prompt(preset_name):
2750
  return {"title": "", "details": "", "system_prompt": "", "user_prompt": ""}
2751
 
2752
 
 
 
 
 
 
2753
  # FIXME - add additional features....
2754
  def chat_wrapper(message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, conversation_id, save_conversation, temperature, system_prompt, max_tokens=None, top_p=None, frequency_penalty=None, presence_penalty=None, stop_sequence=None):
2755
  try:
@@ -2819,10 +3188,6 @@ def load_conversation(conversation_id):
2819
  return history, conversation_id
2820
 
2821
 
2822
- def clear_chat():
2823
- return gr.update(value=[]), None
2824
-
2825
-
2826
  def update_message_in_chat(message_id, new_text, history):
2827
  update_chat_message(message_id, new_text)
2828
  updated_history = [(msg1, msg2) if msg1[1] != message_id and msg2[1] != message_id
@@ -2873,13 +3238,9 @@ def create_chat_interface():
2873
  with gr.Row():
2874
  load_conversations_btn = gr.Button("Load Selected Conversation")
2875
 
2876
- api_endpoint = gr.Dropdown(label="Select API Endpoint",
2877
- choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
2878
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
2879
- value="Mistral")
2880
- api_key = gr.Textbox(label="API Key (if required)",
2881
- type="password",
2882
- value=MISTRAL_TOKEN)
2883
  custom_prompt_checkbox = gr.Checkbox(label="Use a Custom Prompt",
2884
  value=False,
2885
  visible=True)
@@ -2901,6 +3262,7 @@ def create_chat_interface():
2901
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
2902
  msg = gr.Textbox(label="Enter your message")
2903
  submit = gr.Button("Submit")
 
2904
 
2905
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
2906
  edit_message_text = gr.Textbox(label="Edit Message", visible=False)
@@ -2912,6 +3274,7 @@ def create_chat_interface():
2912
  save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
2913
  save_chat_history_as_file = gr.Button("Save Chat History as File")
2914
  download_file = gr.File(label="Download Chat History")
 
2915
 
2916
  # Restore original functionality
2917
  search_button.click(
@@ -2920,6 +3283,19 @@ def create_chat_interface():
2920
  outputs=[items_output, item_mapping]
2921
  )
2922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2923
  def update_prompts(preset_name):
2924
  prompts = update_user_prompt(preset_name)
2925
  return (
@@ -2927,6 +3303,13 @@ def create_chat_interface():
2927
  gr.update(value=prompts["system_prompt"], visible=True)
2928
  )
2929
 
 
 
 
 
 
 
 
2930
  preset_prompt.change(
2931
  update_prompts,
2932
  inputs=preset_prompt,
@@ -2953,7 +3336,7 @@ def create_chat_interface():
2953
  inputs=[chatbot],
2954
  outputs=[msg]
2955
  ).then(# Clear the user prompt after the first message
2956
- lambda: gr.update(value=""),
2957
  outputs=[user_prompt, system_prompt_input]
2958
  )
2959
 
@@ -3019,241 +3402,6 @@ def create_chat_interface():
3019
  chatbot.select(show_delete_message, None, [delete_message_id, delete_message_button])
3020
 
3021
 
3022
- def create_chat_interface_editable():
3023
- custom_css = """
3024
- .chatbot-container .message-wrap .message {
3025
- font-size: 14px !important;
3026
- cursor: pointer;
3027
- transition: background-color 0.3s;
3028
- }
3029
- .chatbot-container .message-wrap .message:hover {
3030
- background-color: #f0f0f0;
3031
- }
3032
- .chatbot-container .message-wrap .message.selected-message {
3033
- background-color: #e0e0e0;
3034
- }
3035
- """
3036
-
3037
- custom_js = """
3038
- function selectMessage(el) {
3039
- el.classList.toggle('selected-message');
3040
- updateSelectedMessages();
3041
- }
3042
-
3043
- function updateSelectedMessages() {
3044
- const selectedMessages = document.querySelectorAll('.selected-message');
3045
- const messageIds = Array.from(selectedMessages).map(el => el.dataset.messageId);
3046
- const selectedMessagesInput = document.getElementById('selected_messages');
3047
- selectedMessagesInput.value = JSON.stringify(messageIds);
3048
- selectedMessagesInput.dispatchEvent(new Event('change'));
3049
- }
3050
- """
3051
-
3052
- with gr.TabItem("Remote LLM Chat - Editable"):
3053
- gr.Markdown("# Chat with a designated LLM Endpoint, using your selected item as starting context")
3054
- gr.HTML("<script>" + custom_js + "</script>")
3055
-
3056
- chat_history = gr.State([])
3057
- media_content = gr.State({})
3058
- selected_parts = gr.State([])
3059
- conversation_id = gr.State(None)
3060
-
3061
- with gr.Row():
3062
- with gr.Column(scale=1):
3063
- search_query_input = gr.Textbox(label="Search Query", placeholder="Enter your search query here...")
3064
- search_type_input = gr.Radio(choices=["Title", "URL", "Keyword", "Content"], value="Title",
3065
- label="Search By")
3066
- search_button = gr.Button("Search")
3067
- items_output = gr.Dropdown(label="Select Item", choices=[], interactive=True)
3068
- item_mapping = gr.State({})
3069
- with gr.Row():
3070
- use_content = gr.Checkbox(label="Use Content")
3071
- use_summary = gr.Checkbox(label="Use Summary")
3072
- custom_prompt_checkbox = gr.Checkbox(label="Use a Custom Prompt",
3073
- value=False,
3074
- visible=True)
3075
- save_conversation = gr.Checkbox(label="Save Conversation", value=False, visible=True)
3076
- with gr.Row():
3077
- temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
3078
- with gr.Row():
3079
- conversation_search = gr.Textbox(label="Search Conversations")
3080
- with gr.Row():
3081
- search_conversations_btn = gr.Button("Search Conversations")
3082
- with gr.Row():
3083
- previous_conversations = gr.Dropdown(label="Select Conversation", choices=[], interactive=True)
3084
- with gr.Row():
3085
- load_conversations_btn = gr.Button("Load Selected Conversation")
3086
-
3087
- api_endpoint = gr.Dropdown(label="Select API Endpoint",
3088
- choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
3089
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
3090
- value="Mistral")
3091
- api_key = gr.Textbox(label="API Key (if required)",
3092
- type="password",
3093
- value=MISTRAL_TOKEN)
3094
- # preset_prompt_checkbox = gr.Checkbox(label="Use a pre-set Prompt",
3095
- # value=False,
3096
- # visible=True)
3097
- preset_prompt = gr.Dropdown(label="Select Preset Prompt",
3098
- choices=load_preset_prompts(),
3099
- visible=False)
3100
- custom_prompt_input = gr.Textbox(label="Custom Prompt",
3101
- placeholder="Enter custom prompt here",
3102
- lines=3,
3103
- visible=False)
3104
- system_prompt_input = gr.Textbox(label="System Prompt",
3105
- value="You are a helpful AI assistant.",
3106
- lines=3,
3107
- visible=False)
3108
- with gr.Column():
3109
- chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
3110
- selected_messages = gr.JSON(elem_id="selected_messages", visible=False)
3111
- msg = gr.Textbox(label="Enter your message")
3112
- submit = gr.Button("Submit")
3113
-
3114
- edit_message_text = gr.Textbox(label="Edit Selected Message", visible=True)
3115
- update_message_button = gr.Button("Update Selected Message", visible=True)
3116
-
3117
- delete_message_button = gr.Button("Delete Selected Messages", visible=True)
3118
-
3119
- save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
3120
- save_chat_history_as_file = gr.Button("Save Chat History as File")
3121
- download_file = gr.File(label="Download Chat History")
3122
-
3123
- # Event handlers
3124
- search_button.click(
3125
- fn=update_dropdown,
3126
- inputs=[search_query_input, search_type_input],
3127
- outputs=[items_output, item_mapping]
3128
- )
3129
-
3130
- def update_prompts(preset_name):
3131
- prompts = update_user_prompt(preset_name)
3132
- return (
3133
- gr.update(value=prompts["user_prompt"], visible=True),
3134
- gr.update(value=prompts["system_prompt"], visible=True)
3135
- )
3136
-
3137
- preset_prompt.change(
3138
- update_prompts,
3139
- inputs=preset_prompt,
3140
- outputs=[custom_prompt_input, system_prompt_input]
3141
- )
3142
-
3143
- submit.click(
3144
- chat_wrapper,
3145
- inputs=[msg, chatbot, media_content, selected_parts, api_endpoint, api_key, custom_prompt_input,
3146
- conversation_id, save_conversation, temperature, system_prompt_input],
3147
- outputs=[msg, chatbot, conversation_id]
3148
- ).then(
3149
- lambda x: gr.update(value=""),
3150
- inputs=[chatbot],
3151
- outputs=[msg]
3152
- ).then(
3153
- lambda: gr.update(value=""),
3154
- outputs=[custom_prompt_input, system_prompt_input]
3155
- )
3156
-
3157
- items_output.change(
3158
- update_chat_content,
3159
- inputs=[items_output, use_content, use_summary, custom_prompt_input, item_mapping],
3160
- outputs=[media_content, selected_parts]
3161
- )
3162
- use_content.change(update_selected_parts, inputs=[use_content, use_summary, custom_prompt_input],
3163
- outputs=[selected_parts])
3164
- use_summary.change(update_selected_parts, inputs=[use_content, use_summary, custom_prompt_input],
3165
- outputs=[selected_parts])
3166
- custom_prompt_input.change(update_selected_parts, inputs=[use_content, use_summary, custom_prompt_input],
3167
- outputs=[selected_parts])
3168
- items_output.change(debug_output, inputs=[media_content, selected_parts], outputs=[])
3169
-
3170
- search_conversations_btn.click(
3171
- search_conversations,
3172
- inputs=[conversation_search],
3173
- outputs=[previous_conversations]
3174
- )
3175
-
3176
- load_conversations_btn.click(
3177
- clear_chat,
3178
- outputs=[chatbot, chat_history]
3179
- ).then(
3180
- load_conversation,
3181
- inputs=[previous_conversations],
3182
- outputs=[chatbot, conversation_id]
3183
- )
3184
-
3185
- previous_conversations.change(
3186
- load_conversation,
3187
- inputs=[previous_conversations],
3188
- outputs=[chat_history]
3189
- )
3190
-
3191
- def show_edit_message(evt: gr.SelectData, chat_history):
3192
- selected_id = json.dumps([evt.index])
3193
- return gr.update(value=chat_history[evt.index][0]), gr.update(value=selected_id)
3194
-
3195
- chatbot.select(
3196
- show_edit_message,
3197
- inputs=[chat_history],
3198
- outputs=[edit_message_text, selected_messages]
3199
- )
3200
-
3201
- def edit_selected_message(selected, edit_text, history):
3202
- try:
3203
- selected_ids = json.loads(selected) if selected else []
3204
- except json.JSONDecodeError:
3205
- print("Invalid JSON in selected messages")
3206
- return history
3207
-
3208
- if len(selected_ids) != 1:
3209
- print(f"Expected 1 selected message, got {len(selected_ids)}")
3210
- return history
3211
-
3212
- message_id = int(selected_ids[0])
3213
- if 0 <= message_id < len(history):
3214
- history[message_id] = (edit_text, history[message_id][1])
3215
- else:
3216
- print(f"Invalid message ID: {message_id}")
3217
- return history
3218
-
3219
- def delete_selected_messages(selected, history):
3220
- selected_ids = json.loads(selected)
3221
- selected_ids = [int(id) for id in selected_ids]
3222
- selected_ids.sort(reverse=True)
3223
- for message_id in selected_ids:
3224
- if 0 <= message_id < len(history):
3225
- del history[message_id]
3226
- return history, "" # Clear selected_messages
3227
-
3228
- update_message_button.click(
3229
- edit_selected_message,
3230
- inputs=[selected_messages, edit_message_text, chat_history],
3231
- outputs=[chatbot]
3232
- )
3233
-
3234
- delete_message_button.click(
3235
- delete_selected_messages,
3236
- inputs=[selected_messages, chat_history],
3237
- outputs=[chatbot, selected_messages]
3238
- )
3239
-
3240
- save_chat_history_as_file.click(
3241
- save_chat_history,
3242
- inputs=[chatbot, conversation_id],
3243
- outputs=[download_file]
3244
- )
3245
-
3246
- save_chat_history_to_db.click(
3247
- save_chat_history_to_db_wrapper,
3248
- inputs=[chatbot, conversation_id, media_content],
3249
- outputs=[conversation_id, gr.Textbox(label="Save Status")]
3250
- )
3251
-
3252
- #chatbot.select(show_edit_message, chat_history, [edit_message_text, gr.update()])
3253
-
3254
- return chatbot, chat_history, conversation_id
3255
-
3256
-
3257
  def create_chat_interface_stacked():
3258
  custom_css = """
3259
  .chatbot-container .message-wrap .message {
@@ -3288,12 +3436,8 @@ def create_chat_interface_stacked():
3288
  search_conversations_btn = gr.Button("Search Conversations")
3289
  load_conversations_btn = gr.Button("Load Selected Conversation")
3290
  with gr.Column():
3291
- api_endpoint = gr.Dropdown(label="Select API Endpoint",
3292
- choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "OpenRouter", "Mistral", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
3293
- value="Mistral")
3294
- api_key = gr.Textbox(label="API Key (if required)",
3295
- type="password",
3296
- value=MISTRAL_TOKEN)
3297
  preset_prompt = gr.Dropdown(label="Select Preset Prompt",
3298
  choices=load_preset_prompts(),
3299
  visible=True)
@@ -3313,6 +3457,7 @@ def create_chat_interface_stacked():
3313
  with gr.Row():
3314
  with gr.Column():
3315
  submit = gr.Button("Submit")
 
3316
 
3317
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
3318
  edit_message_text = gr.Textbox(label="Edit Message", visible=False)
@@ -3339,6 +3484,10 @@ def create_chat_interface_stacked():
3339
  gr.update(value=prompts["system_prompt"], visible=True)
3340
  )
3341
 
 
 
 
 
3342
  preset_prompt.change(
3343
  update_prompts,
3344
  inputs=preset_prompt,
@@ -3463,11 +3612,8 @@ def create_chat_interface_multi_api():
3463
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
3464
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
3465
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba",
3466
- "Tabbyapi", "VLLM", "HuggingFace"],
3467
- value="Mistral")
3468
- api_key = gr.Textbox(label=f"API Key {i + 1} (if required)",
3469
- type="password",
3470
- value=MISTRAL_TOKEN)
3471
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
3472
  chatbot = gr.Chatbot(height=800, elem_classes="chat-window")
3473
  chatbots.append(chatbot)
@@ -3478,6 +3624,13 @@ def create_chat_interface_multi_api():
3478
  with gr.Row():
3479
  msg = gr.Textbox(label="Enter your message", scale=4)
3480
  submit = gr.Button("Submit", scale=1)
 
 
 
 
 
 
 
3481
 
3482
  # State variables
3483
  chat_history = [gr.State([]) for _ in range(3)]
@@ -3577,9 +3730,8 @@ def create_chat_interface_four():
3577
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
3578
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
3579
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba",
3580
- "Tabbyapi", "VLLM", "HuggingFace"],
3581
- value="Mistral")
3582
- api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password", value=MISTRAL_TOKEN)
3583
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
3584
  chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
3585
  msg = gr.Textbox(label=f"Enter your message for Chat {i + 1}")
@@ -3643,88 +3795,7 @@ def chat_wrapper_single(message, chat_history, chatbot, api_endpoint, api_key, t
3643
  return new_msg, updated_chatbot, new_history, new_conv_id
3644
 
3645
 
3646
- def create_chat_interface_vertical():
3647
- with gr.TabItem("Remote LLM Chat (No Saving)"):
3648
- gr.Markdown("# Chat with a designated LLM Endpoint, using your selected item as starting context")
3649
- chat_history = gr.State([])
3650
- media_content = gr.State({})
3651
- selected_parts = gr.State([])
3652
- conversation_id = gr.State(None)
3653
-
3654
- with gr.Row():
3655
- with gr.Column(scale=1):
3656
- search_query_input = gr.Textbox(label="Search Query", placeholder="Enter your search query here...")
3657
- search_type_input = gr.Radio(choices=["Title", "URL", "Keyword", "Content"], value="Title", label="Search By")
3658
- search_button = gr.Button("Search")
3659
- items_output = gr.Dropdown(label="Select Item", choices=[], interactive=True)
3660
- item_mapping = gr.State({})
3661
- with gr.Row():
3662
- use_content = gr.Checkbox(label="Use Content")
3663
- use_summary = gr.Checkbox(label="Use Summary")
3664
- use_prompt = gr.Checkbox(label="Use Prompt")
3665
- save_conversation = gr.Checkbox(label="Save Conversation", value=False)
3666
- with gr.Row():
3667
- api_endpoint = gr.Dropdown(label="Select API Endpoint",
3668
- choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
3669
- value="Mistral")
3670
- with gr.Row():
3671
- api_key = gr.Textbox(label="API Key (if required)",
3672
- type="password",
3673
- value=MISTRAL_TOKEN)
3674
- with gr.Row():
3675
- temperature = gr.Slider(label="Temperature", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
3676
- with gr.Row():
3677
- preset_prompt = gr.Dropdown(label="Select Preset Prompt", choices=load_preset_prompts(), visible=True)
3678
- with gr.Row():
3679
- user_prompt = gr.Textbox(label="Modify Prompt", lines=3)
3680
-
3681
- with gr.Column():
3682
- gr.Markdown("#### Chat Window")
3683
- chatbot = gr.Chatbot(height=500)
3684
- msg = gr.Textbox(label="Enter your message")
3685
- submit = gr.Button("Submit")
3686
- save_button = gr.Button("Save Chat History")
3687
- download_file = gr.File(label="Download Chat History")
3688
-
3689
- save_button.click(save_chat_history, inputs=[chatbot], outputs=[download_file])
3690
-
3691
- search_button.click(
3692
- fn=update_dropdown,
3693
- inputs=[search_query_input, search_type_input],
3694
- outputs=[items_output, item_mapping]
3695
- )
3696
-
3697
- preset_prompt.change(update_user_prompt, inputs=preset_prompt, outputs=user_prompt)
3698
-
3699
- items_output.change(
3700
- update_chat_content,
3701
- inputs=[items_output, use_content, use_summary, use_prompt, item_mapping],
3702
- outputs=[media_content, selected_parts]
3703
- )
3704
-
3705
- for checkbox in [use_content, use_summary, use_prompt]:
3706
- checkbox.change(
3707
- update_selected_parts,
3708
- inputs=[use_content, use_summary, use_prompt],
3709
- outputs=[selected_parts]
3710
- )
3711
-
3712
- items_output.change(debug_output, inputs=[media_content, selected_parts], outputs=[])
3713
-
3714
- submit.click(
3715
- chat_wrapper_single,
3716
- inputs=[msg, chat_history, chatbot, api_endpoint, api_key, temperature, media_content, selected_parts, conversation_id, save_conversation, user_prompt],
3717
- outputs=[msg, chatbot, chat_history, conversation_id]
3718
- ).then(
3719
- lambda x: gr.update(value=""),
3720
- inputs=[chatbot],
3721
- outputs=[msg]
3722
- ).then(
3723
- lambda: gr.update(value=""),
3724
- outputs=[user_prompt]
3725
- )
3726
-
3727
-
3728
  def create_chat_management_tab():
3729
  with gr.TabItem("Chat Management"):
3730
  gr.Markdown("# Chat Management")
@@ -3736,33 +3807,151 @@ def create_chat_management_tab():
3736
  conversation_list = gr.Dropdown(label="Select Conversation", choices=[])
3737
  conversation_mapping = gr.State({})
3738
 
3739
- with gr.Row():
3740
- message_input = gr.Textbox(label="New Message")
3741
- send_button = gr.Button("Send")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3742
 
3743
- chat_display = gr.HTML(label="Chat Messages")
3744
 
3745
- edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
3746
- edit_message_text = gr.Textbox(label="Edit Message", visible=False)
3747
- update_message_button = gr.Button("Update Message", visible=False)
3748
 
3749
- delete_message_id = gr.Number(label="Message ID to Delete", visible=False)
3750
- delete_message_button = gr.Button("Delete Message", visible=False)
 
 
 
 
3751
 
3752
- def send_message(selected_conversation, message):
3753
- conversation_id = conversation_mapping.value.get(selected_conversation)
3754
- if conversation_id:
3755
- add_chat_message(conversation_id, "user", message)
3756
- return load_conversation(selected_conversation), ""
3757
- return "Please select a conversation first.", message
3758
 
3759
- def update_message(message_id, new_text, selected_conversation):
3760
- update_chat_message(message_id, new_text)
3761
- return load_conversation(selected_conversation), gr.update(value="", visible=False), gr.update(value="", visible=False), gr.update(visible=False)
 
 
 
 
 
 
3762
 
3763
- def delete_message(message_id, selected_conversation):
3764
- delete_chat_message(message_id)
3765
- return load_conversation(selected_conversation), gr.update(value="", visible=False), gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3766
 
3767
  search_button.click(
3768
  search_conversations,
@@ -3771,26 +3960,18 @@ def create_chat_management_tab():
3771
  )
3772
 
3773
  conversation_list.change(
3774
- load_conversation,
3775
- inputs=[conversation_list],
3776
- outputs=[chat_display]
3777
  )
3778
- send_button.click(
3779
- send_message,
3780
- inputs=[conversation_list, message_input],
3781
- outputs=[chat_display, message_input]
3782
- )
3783
- update_message_button.click(
3784
- update_message,
3785
- inputs=[edit_message_id, edit_message_text, conversation_list],
3786
- outputs=[chat_display, edit_message_id, edit_message_text, update_message_button]
3787
- )
3788
- delete_message_button.click(
3789
- delete_message,
3790
- inputs=[delete_message_id, conversation_list],
3791
- outputs=[chat_display, delete_message_id, delete_message_button]
3792
  )
3793
 
 
3794
 
3795
  #
3796
  # End of Chat Interface Tab Functions
@@ -4240,7 +4421,7 @@ def import_obsidian_vault(vault_path, progress=gr.Progress()):
4240
  errors.append(error_msg)
4241
 
4242
  progress((i + 1) / total_files, f"Imported {imported_files} of {total_files} files")
4243
- time.sleep(0.1) # Small delay to prevent UI freezing
4244
 
4245
  return imported_files, total_files, errors
4246
  except Exception as e:
@@ -4363,13 +4544,10 @@ def create_import_item_tab():
4363
  auto_summarize_checkbox = gr.Checkbox(label="Auto-summarize", value=False)
4364
  api_name_input = gr.Dropdown(
4365
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
4366
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
4367
- label="API for Auto-summarization",
4368
- value="Mistral"
4369
  )
4370
- api_key_input = gr.Textbox(label="API Key",
4371
- type="password",
4372
- value=MISTRAL_TOKEN)
4373
  with gr.Row():
4374
  import_button = gr.Button("Import Data")
4375
  with gr.Row():
@@ -4700,19 +4878,16 @@ def create_import_book_tab():
4700
  auto_summarize_checkbox = gr.Checkbox(label="Auto-summarize", value=False)
4701
  api_name_input = gr.Dropdown(
4702
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
4703
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
4704
- label="API for Auto-summarization",
4705
- value="Mistral"
4706
  )
4707
- api_key_input = gr.Textbox(label="API Key",
4708
- type="password",
4709
- value=MISTRAL_TOKEN)
4710
  import_button = gr.Button("Import eBook")
4711
  with gr.Column():
4712
  with gr.Row():
4713
  import_output = gr.Textbox(label="Import Status")
4714
 
4715
- def import_epub(epub_file, title, author, keywords, custom_prompt, summary, auto_summarize, api_name, api_key):
4716
  try:
4717
  # Create a temporary directory to store the converted file
4718
  with tempfile.TemporaryDirectory() as temp_dir:
@@ -4730,8 +4905,8 @@ def create_import_book_tab():
4730
  content = md_file.read()
4731
 
4732
  # Now process the content as you would with a text file
4733
- return import_data(content, title, author, keywords, system_prompt_input,
4734
- custom_prompt_input, auto_summarize, api_name, api_key)
4735
  except Exception as e:
4736
  return f"Error processing EPUB: {str(e)}"
4737
 
@@ -5103,7 +5278,7 @@ def adjust_tone(text, concise, casual, api_name, api_key):
5103
 
5104
  prompt = f"Rewrite the following text to match these tones: {tone_prompt}. Text: {text}"
5105
  # Performing tone adjustment request...
5106
- adjusted_text = perform_summarization(api_name, text, prompt, api_key, system_prompt=None)
5107
 
5108
  return adjusted_text
5109
 
@@ -5127,7 +5302,23 @@ def create_document_editing_tab():
5127
  custom_prompt_checkbox = gr.Checkbox(label="Use Custom Prompt", value=False, visible=True)
5128
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Please analyze the provided text for grammar and style. Offer any suggestions or points to improve you can identify. Additionally please point out any misuses of any words or incorrect spellings.", lines=5, visible=False)
5129
  custom_prompt_input = gr.Textbox(label="user Prompt",
5130
- value="You are a professional summarizer. Please summarize this audio transcript.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5131
  lines=3,
5132
  visible=False)
5133
  custom_prompt_checkbox.change(
@@ -5137,13 +5328,12 @@ def create_document_editing_tab():
5137
  )
5138
  api_name_input = gr.Dropdown(
5139
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
5140
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
5141
- value="Mistral",
5142
  label="API for Grammar Check"
5143
  )
5144
  api_key_input = gr.Textbox(label="API Key (if not set in config.txt)", placeholder="Enter your API key here",
5145
- type="password",
5146
- value=MISTRAL_TOKEN)
5147
  check_grammar_button = gr.Button("Check Grammar and Style")
5148
 
5149
  with gr.Column():
@@ -5167,13 +5357,12 @@ def create_document_editing_tab():
5167
  casual_slider = gr.Slider(minimum=0, maximum=1, value=0.5, label="Casual vs Professional")
5168
  api_name_input = gr.Dropdown(
5169
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
5170
- "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "HuggingFace"],
5171
- value="Mistral",
5172
  label="API for Grammar Check"
5173
  )
5174
  api_key_input = gr.Textbox(label="API Key (if not set in config.txt)", placeholder="Enter your API key here",
5175
- type="password",
5176
- value=MISTRAL_TOKEN)
5177
  adjust_btn = gr.Button("Adjust Tone")
5178
 
5179
  with gr.Column():
@@ -5309,7 +5498,9 @@ def launch_ui(share_public=None, server_mode=False):
5309
  create_import_book_tab()
5310
  create_website_scraping_tab()
5311
  create_pdf_ingestion_tab()
 
5312
  create_resummary_tab()
 
5313
 
5314
  with gr.TabItem("Search / Detailed View"):
5315
  create_search_tab()
@@ -5319,8 +5510,6 @@ def launch_ui(share_public=None, server_mode=False):
5319
  create_prompt_view_tab()
5320
 
5321
  with gr.TabItem("Chat with an LLM"):
5322
- #create_chat_interface_vertical()
5323
- create_chat_interface_editable()
5324
  create_chat_interface()
5325
  create_chat_interface_stacked()
5326
  create_chat_interface_multi_api()
@@ -5329,11 +5518,12 @@ def launch_ui(share_public=None, server_mode=False):
5329
  create_llamafile_settings_tab()
5330
 
5331
  with gr.TabItem("Edit Existing Items"):
5332
- create_compare_transcripts_tab()
5333
  create_media_edit_tab()
5334
  create_media_edit_and_clone_tab()
5335
  create_prompt_edit_tab()
5336
  create_prompt_clone_tab()
 
 
5337
 
5338
  with gr.TabItem("Writing Tools"):
5339
  create_document_editing_tab()
 
23
  import tempfile
24
  import uuid
25
  import zipfile
26
+ from datetime import datetime
27
  import json
28
  import logging
29
  import os.path
30
  from pathlib import Path
31
  import sqlite3
32
+ from time import sleep
33
  from typing import Dict, List, Tuple, Optional
34
  import traceback
35
  from functools import wraps
36
 
 
37
  #
38
  # Import 3rd-Party Libraries
39
+ import pypandoc
40
  import yt_dlp
41
  import gradio as gr
42
  #
 
44
  from App_Function_Libraries.Article_Summarization_Lib import scrape_and_summarize_multiple
45
  from App_Function_Libraries.Audio_Files import process_audio_files, process_podcast, download_youtube_audio
46
  from App_Function_Libraries.Chunk_Lib import improved_chunking_process
47
+ from App_Function_Libraries.PDF_Ingestion_Lib import process_and_cleanup_pdf, extract_text_and_format_from_pdf, \
48
+ extract_metadata_from_pdf
49
  from App_Function_Libraries.Local_LLM_Inference_Engine_Lib import local_llm_gui_function
50
  from App_Function_Libraries.Local_Summarization_Lib import summarize_with_llama, summarize_with_kobold, \
51
+ summarize_with_oobabooga, summarize_with_tabbyapi, summarize_with_vllm, summarize_with_local_llm, \
52
+ summarize_with_ollama
53
  from App_Function_Libraries.Summarization_General_Lib import summarize_with_openai, summarize_with_cohere, \
54
  summarize_with_anthropic, summarize_with_groq, summarize_with_openrouter, summarize_with_deepseek, \
55
  summarize_with_huggingface, perform_summarization, save_transcription_and_summary, \
 
60
  delete_chat_message, update_chat_message, add_chat_message, get_chat_messages, search_chat_conversations, \
61
  create_chat_conversation, save_chat_history_to_database, view_database, get_transcripts, get_trashed_items, \
62
  user_delete_item, empty_trash, create_automated_backup, backup_dir, db_path, add_or_update_prompt, \
63
+ load_prompt_details, load_preset_prompts, insert_prompt_to_db, delete_prompt, search_and_display_items, \
64
+ get_conversation_name
65
  from App_Function_Libraries.Utils import sanitize_filename, extract_text_from_segments, create_download_directory, \
66
+ convert_to_seconds, load_comprehensive_config, safe_read_file, downloaded_files, generate_unique_identifier, \
67
+ generate_unique_filename
68
  from App_Function_Libraries.Video_DL_Ingestion_Lib import parse_and_expand_urls, \
69
  generate_timestamped_url, extract_metadata, download_video
70
 
 
73
  # Function Definitions
74
  #
75
 
 
 
76
  whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large", "large-v1", "large-v2", "large-v3",
77
  "distil-large-v2", "distil-medium.en", "distil-small.en"]
78
  custom_prompt_input = None
 
643
  visible=False)
644
  with gr.Row():
645
  system_prompt_input = gr.Textbox(label="System Prompt",
646
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
647
+ **Bulleted Note Creation Guidelines**
648
+
649
+ **Headings**:
650
+ - Based on referenced topics, not categories like quotes or terms
651
+ - Surrounded by **bold** formatting
652
+ - Not listed as bullet points
653
+ - No space between headings and list items underneath
654
+
655
+ **Emphasis**:
656
+ - **Important terms** set in bold font
657
+ - **Text ending in a colon**: also bolded
658
+
659
+ **Review**:
660
+ - Ensure adherence to specified format
661
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
662
+ """,
663
  lines=3,
664
  visible=False,
665
  interactive=True)
 
689
 
690
  api_name_input = gr.Dropdown(
691
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
692
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"],
693
+ value=None, label="API Name (Mandatory)")
694
+ api_key_input = gr.Textbox(label="API Key (Mandatory)", placeholder="Enter your API key here", type="password")
 
 
 
 
695
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
696
  value="default,no_keyword_set")
697
  batch_size_input = gr.Slider(minimum=1, maximum=10, value=1, step=1,
 
1023
  if url_input:
1024
  inputs.extend([url.strip() for url in url_input.split('\n') if url.strip()])
1025
  if video_file is not None:
1026
+ # Assuming video_file is a file object with a 'name' attribute
1027
+ inputs.append(video_file.name)
1028
 
1029
  if not inputs:
1030
  raise ValueError("No input provided. Please enter URLs or upload a video file.")
 
1084
  # Handle URL or local file
1085
  if os.path.isfile(input_item):
1086
  video_file_path = input_item
1087
+ unique_id = generate_unique_identifier(input_item)
1088
  # Extract basic info from local file
1089
  info_dict = {
1090
+ 'webpage_url': unique_id,
1091
  'title': os.path.basename(input_item),
1092
  'description': "Local file",
1093
  'channel_url': None,
 
1308
  visible=False)
1309
  with gr.Row():
1310
  system_prompt_input = gr.Textbox(label="System Prompt",
1311
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1312
+ **Bulleted Note Creation Guidelines**
1313
+
1314
+ **Headings**:
1315
+ - Based on referenced topics, not categories like quotes or terms
1316
+ - Surrounded by **bold** formatting
1317
+ - Not listed as bullet points
1318
+ - No space between headings and list items underneath
1319
+
1320
+ **Emphasis**:
1321
+ - **Important terms** set in bold font
1322
+ - **Text ending in a colon**: also bolded
1323
+
1324
+ **Review**:
1325
+ - Ensure adherence to specified format
1326
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
1327
+ """,
1328
  lines=3,
1329
  visible=False)
1330
 
 
1354
 
1355
  api_name_input = gr.Dropdown(
1356
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1357
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
1358
+ value=None,
1359
  label="API for Summarization (Optional)"
1360
  )
1361
+ api_key_input = gr.Textbox(label="API Key (if required)", placeholder="Enter your API key here", type="password")
 
 
1362
  custom_keywords_input = gr.Textbox(label="Custom Keywords", placeholder="Enter custom keywords, comma-separated")
1363
  keep_original_input = gr.Checkbox(label="Keep original audio file", value=False)
1364
 
 
1432
  visible=False)
1433
  with gr.Row():
1434
  system_prompt_input = gr.Textbox(label="System Prompt",
1435
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1436
+ **Bulleted Note Creation Guidelines**
1437
+
1438
+ **Headings**:
1439
+ - Based on referenced topics, not categories like quotes or terms
1440
+ - Surrounded by **bold** formatting
1441
+ - Not listed as bullet points
1442
+ - No space between headings and list items underneath
1443
+
1444
+ **Emphasis**:
1445
+ - **Important terms** set in bold font
1446
+ - **Text ending in a colon**: also bolded
1447
+
1448
+ **Review**:
1449
+ - Ensure adherence to specified format
1450
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
1451
+ """,
1452
  lines=3,
1453
  visible=False)
1454
 
 
1478
 
1479
  podcast_api_name_input = gr.Dropdown(
1480
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp",
1481
+ "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
1482
+ value=None,
1483
  label="API Name for Summarization (Optional)"
1484
  )
1485
+ podcast_api_key_input = gr.Textbox(label="API Key (if required)", type="password")
 
 
1486
  podcast_whisper_model_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
1487
 
1488
  keep_original_input = gr.Checkbox(label="Keep original audio file", value=False)
 
1570
  visible=False)
1571
  with gr.Row():
1572
  system_prompt_input = gr.Textbox(label="System Prompt",
1573
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1574
+ **Bulleted Note Creation Guidelines**
1575
+
1576
+ **Headings**:
1577
+ - Based on referenced topics, not categories like quotes or terms
1578
+ - Surrounded by **bold** formatting
1579
+ - Not listed as bullet points
1580
+ - No space between headings and list items underneath
1581
+
1582
+ **Emphasis**:
1583
+ - **Important terms** set in bold font
1584
+ - **Text ending in a colon**: also bolded
1585
+
1586
+ **Review**:
1587
+ - Ensure adherence to specified format
1588
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
1589
+ """,
1590
  lines=3,
1591
  visible=False)
1592
 
 
1616
 
1617
  api_name_input = gr.Dropdown(
1618
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1619
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"], value=None, label="API Name (Mandatory for Summarization)")
 
 
 
1620
  api_key_input = gr.Textbox(label="API Key (Mandatory if API Name is specified)",
1621
+ placeholder="Enter your API key here; Ignore if using Local API or Built-in API", type="password")
 
 
1622
  keywords_input = gr.Textbox(label="Keywords", placeholder="Enter keywords here (comma-separated)",
1623
  value="default,no_keyword_set", visible=True)
1624
 
 
1663
  visible=False)
1664
  with gr.Row():
1665
  system_prompt_input = gr.Textbox(label="System Prompt",
1666
+ value="""
1667
+ <s>You are a bulleted notes specialist.
1668
+ [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1669
+ **Bulleted Note Creation Guidelines**
1670
+
1671
+ **Headings**:
1672
+ - Based on referenced topics, not categories like quotes or terms
1673
+ - Surrounded by **bold** formatting
1674
+ - Not listed as bullet points
1675
+ - No space between headings and list items underneath
1676
+
1677
+ **Emphasis**:
1678
+ - **Important terms** set in bold font
1679
+ - **Text ending in a colon**: also bolded
1680
+
1681
+ **Review**:
1682
+ - Ensure adherence to specified format
1683
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]""",
1684
  lines=3,
1685
  visible=False)
1686
 
 
1719
  inputs=[pdf_file_input, pdf_title_input, pdf_author_input, pdf_keywords_input],
1720
  outputs=pdf_result_output
1721
  )
1722
+
1723
+
1724
+ def test_pdf_ingestion(pdf_file):
1725
+ if pdf_file is None:
1726
+ return "No file uploaded", ""
1727
+
1728
+ try:
1729
+ # Create a temporary directory
1730
+ with tempfile.TemporaryDirectory() as temp_dir:
1731
+ # Create a path for the temporary PDF file
1732
+ temp_path = os.path.join(temp_dir, "temp.pdf")
1733
+
1734
+ # Copy the contents of the uploaded file to the temporary file
1735
+ shutil.copy(pdf_file.name, temp_path)
1736
+
1737
+ # Extract text and convert to Markdown
1738
+ markdown_text = extract_text_and_format_from_pdf(temp_path)
1739
+
1740
+ # Extract metadata from PDF
1741
+ metadata = extract_metadata_from_pdf(temp_path)
1742
+
1743
+ # Use metadata for title and author if not provided
1744
+ title = metadata.get('title', os.path.splitext(os.path.basename(pdf_file.name))[0])
1745
+ author = metadata.get('author', 'Unknown')
1746
+
1747
+ result = f"PDF '{title}' by {author} processed successfully."
1748
+ return result, markdown_text
1749
+ except Exception as e:
1750
+ return f"Error ingesting PDF: {str(e)}", ""
1751
+
1752
+ def create_pdf_ingestion_test_tab():
1753
+ with gr.TabItem("Test PDF Ingestion"):
1754
+ with gr.Row():
1755
+ with gr.Column():
1756
+ pdf_file_input = gr.File(label="Upload PDF for testing")
1757
+ test_button = gr.Button("Test PDF Ingestion")
1758
+ with gr.Column():
1759
+ test_output = gr.Textbox(label="Test Result")
1760
+ pdf_content_output = gr.Textbox(label="PDF Content", lines=200)
1761
+ test_button.click(
1762
+ fn=test_pdf_ingestion,
1763
+ inputs=[pdf_file_input],
1764
+ outputs=[test_output, pdf_content_output]
1765
+ )
1766
+
1767
+
1768
  #
1769
  #
1770
  ################################################################################################################
 
1788
  with gr.Row():
1789
  api_name_input = gr.Dropdown(
1790
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
1791
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
1792
+ value="Local-LLM", label="API Name")
1793
+ api_key_input = gr.Textbox(label="API Key", placeholder="Enter your API key here", type="password")
 
 
 
 
 
1794
 
1795
  chunking_options_checkbox = gr.Checkbox(label="Use Chunking", value=False)
1796
  with gr.Row(visible=False) as chunking_options_box:
 
1817
  visible=False)
1818
  with gr.Row():
1819
  system_prompt_input = gr.Textbox(label="System Prompt",
1820
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1821
+ **Bulleted Note Creation Guidelines**
1822
+
1823
+ **Headings**:
1824
+ - Based on referenced topics, not categories like quotes or terms
1825
+ - Surrounded by **bold** formatting
1826
+ - Not listed as bullet points
1827
+ - No space between headings and list items underneath
1828
+
1829
+ **Emphasis**:
1830
+ - **Important terms** set in bold font
1831
+ - **Text ending in a colon**: also bolded
1832
+
1833
+ **Review**:
1834
+ - Ensure adherence to specified format
1835
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
1836
+ """,
1837
  lines=3,
1838
  visible=False)
1839
 
 
1904
 
1905
  item_options = [f"{item[1]} ({item[2]})" for item in results]
1906
  item_mapping = {f"{item[1]} ({item[2]})": item[0] for item in results}
1907
+ logging.debug(f"item_options: {item_options}")
1908
+ logging.debug(f"item_mapping: {item_mapping}")
1909
  return gr.update(choices=item_options), item_mapping
1910
 
1911
 
1912
+ def resummarize_content_wrapper(selected_item, item_mapping, api_name, api_key=None, chunking_options_checkbox=None, chunk_method=None,
1913
+ max_chunk_size=None, chunk_overlap=None, custom_prompt_checkbox=None, custom_prompt=None):
1914
+ logging.debug(f"resummarize_content_wrapper called with item_mapping type: {type(item_mapping)}")
1915
+ logging.debug(f"selected_item: {selected_item}")
1916
+
1917
+ if not selected_item or not api_name:
1918
  return "Please select an item and provide API details."
1919
 
1920
+ # Handle potential string representation of item_mapping
1921
+ if isinstance(item_mapping, str):
1922
+ try:
1923
+ item_mapping = json.loads(item_mapping)
1924
+ except json.JSONDecodeError:
1925
+ return f"Error: item_mapping is a string but not valid JSON. Value: {item_mapping[:100]}..."
1926
+
1927
+ if not isinstance(item_mapping, dict):
1928
+ return f"Error: item_mapping is not a dictionary or valid JSON string. Type: {type(item_mapping)}"
1929
+
1930
  media_id = item_mapping.get(selected_item)
1931
  if not media_id:
1932
+ return f"Invalid selection. Selected item: {selected_item}, Available items: {list(item_mapping.keys())[:5]}..."
1933
 
1934
  content, old_prompt, old_summary = fetch_item_details(media_id)
1935
 
 
1939
  # Prepare chunking options
1940
  chunk_options = {
1941
  'method': chunk_method,
1942
+ 'max_size': int(max_chunk_size) if max_chunk_size is not None else None,
1943
+ 'overlap': int(chunk_overlap) if chunk_overlap is not None else None,
1944
  'language': 'english',
1945
  'adaptive': True,
1946
  'multi_level': False,
 
1949
  # Prepare summarization prompt
1950
  summarization_prompt = custom_prompt if custom_prompt_checkbox and custom_prompt else None
1951
 
1952
+ logging.debug(f"Calling resummarize_content with media_id: {media_id}")
1953
+ # Call the resummarize_content function
1954
+ result = resummarize_content(selected_item, item_mapping, content, api_name, api_key, chunk_options, summarization_prompt)
1955
 
1956
  return result
1957
 
1958
 
1959
+ # FIXME - should be moved...
1960
+ def resummarize_content(selected_item, item_mapping, content, api_name, api_key=None, chunk_options=None, summarization_prompt=None):
1961
+ logging.debug(f"resummarize_content called with selected_item: {selected_item}")
 
 
 
 
 
 
 
 
 
 
1962
  # Load configuration
1963
  config = load_comprehensive_config()
1964
 
 
 
 
 
 
 
 
 
 
 
1965
  # Chunking logic
1966
+ if chunk_options:
1967
  chunks = improved_chunking_process(content, chunk_options)
1968
  else:
1969
  chunks = [{'text': content, 'metadata': {}}]
1970
 
1971
+ # Use default prompt if not provided
1972
+ if not summarization_prompt:
1973
+ summarization_prompt = config.get('Prompts', 'default_summary_prompt', fallback="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
1974
+ **Bulleted Note Creation Guidelines**
1975
+
1976
+ **Headings**:
1977
+ - Based on referenced topics, not categories like quotes or terms
1978
+ - Surrounded by **bold** formatting
1979
+ - Not listed as bullet points
1980
+ - No space between headings and list items underneath
1981
+
1982
+ **Emphasis**:
1983
+ - **Important terms** set in bold font
1984
+ - **Text ending in a colon**: also bolded
1985
+
1986
+ **Review**:
1987
+ - Ensure adherence to specified format
1988
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]""")
1989
 
1990
  # Summarization logic
1991
  summaries = []
 
2007
  new_summary = " ".join(summaries)
2008
 
2009
  # Update the database with the new summary
2010
+
2011
  try:
2012
  update_result = update_media_content(selected_item, item_mapping, content, summarization_prompt, new_summary)
2013
  if "successfully" in update_result.lower():
2014
+ return f"Re-summarization complete. New summary: {new_summary}..."
2015
  else:
2016
  return f"Error during database update: {update_result}"
2017
  except Exception as e:
2018
  logging.error(f"Error updating database: {str(e)}")
2019
  return f"Error updating database: {str(e)}"
2020
 
2021
+
2022
  # End of Re-Summarization Functions
2023
  #
2024
+ ############################################################################################################################################################################################################################
2025
+ #
2026
+ # Explain/Summarize This Tab
2027
+
2028
+ def create_summarize_explain_tab():
2029
+ with gr.TabItem("Explain/Summarize Text"):
2030
+ gr.Markdown("# Explain or Summarize Text without ingesting it into the DB")
2031
+ with gr.Row():
2032
+ with gr.Column():
2033
+ text_to_work_input = gr.Textbox(label="Text to be Explained or Summarized", placeholder="Enter the text you want explained or summarized here", lines=20)
2034
+ with gr.Row():
2035
+ explanation_checkbox = gr.Checkbox(label="Explain Text", value=True)
2036
+ summarization_checkbox = gr.Checkbox(label="Summarize Text", value=True)
2037
+ api_endpoint = gr.Dropdown(
2038
+ choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
2039
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
2040
+ value=None,
2041
+ label="API for Summarization (Optional)"
2042
+ )
2043
+ api_key_input = gr.Textbox(label="API Key (if required)", placeholder="Enter your API key here", type="password")
2044
+ explain_summarize_button = gr.Button("Explain/Summarize")
2045
+
2046
+ with gr.Column():
2047
+ summarization_output = gr.Textbox(label="Summary:", lines=20)
2048
+ explanation_output = gr.Textbox(label="Explanation:", lines=50)
2049
+
2050
+ explain_summarize_button.click(
2051
+ fn=summarize_explain_text,
2052
+ inputs=[text_to_work_input, api_endpoint, api_key_input, summarization_checkbox, explanation_checkbox],
2053
+ outputs=[summarization_output, explanation_output]
2054
+ )
2055
+
2056
+
2057
+ def summarize_explain_text(message, api_endpoint, api_key, summarization, explanation):
2058
+ summarization_response = None
2059
+ explanation_response = None
2060
+ temp = 0.7
2061
+ try:
2062
+ logging.info(f"Debug - summarize_explain_text Function - Message: {message}")
2063
+ logging.info(f"Debug - summarize_explain_text Function - API Endpoint: {api_endpoint}")
2064
+
2065
+ # Prepare the input for the API
2066
+ input_data = f"User: {message}\n"
2067
+ # Print first 500 chars
2068
+ logging.info(f"Debug - Chat Function - Input Data: {input_data[:500]}...")
2069
+ logging.debug(f"Debug - Chat Function - API Key: {api_key[:10]}")
2070
+ user_prompt = " "
2071
+ if not api_endpoint:
2072
+ return "Please select an API endpoint", "Please select an API endpoint"
2073
+ try:
2074
+ if summarization:
2075
+ system_prompt = """<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
2076
+ **Bulleted Note Creation Guidelines**
2077
+
2078
+ **Headings**:
2079
+ - Based on referenced topics, not categories like quotes or terms
2080
+ - Surrounded by **bold** formatting
2081
+ - Not listed as bullet points
2082
+ - No space between headings and list items underneath
2083
+
2084
+ **Emphasis**:
2085
+ - **Important terms** set in bold font
2086
+ - **Text ending in a colon**: also bolded
2087
+
2088
+ **Review**:
2089
+ - Ensure adherence to specified format
2090
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]"""
2091
+
2092
+ # Use the existing API request code based on the selected endpoint
2093
+ logging.info(f"Debug - Chat Function - API Endpoint: {api_endpoint}")
2094
+ if api_endpoint.lower() == 'openai':
2095
+ summarization_response = summarize_with_openai(api_key, input_data, user_prompt, temp, system_prompt)
2096
+ elif api_endpoint.lower() == "anthropic":
2097
+ summarization_response = summarize_with_anthropic(api_key, input_data, user_prompt, temp, system_prompt)
2098
+ elif api_endpoint.lower() == "cohere":
2099
+ summarization_response = summarize_with_cohere(api_key, input_data, user_prompt, temp, system_prompt)
2100
+ elif api_endpoint.lower() == "groq":
2101
+ summarization_response = summarize_with_groq(api_key, input_data, user_prompt, temp, system_prompt)
2102
+ elif api_endpoint.lower() == "openrouter":
2103
+ summarization_response = summarize_with_openrouter(api_key, input_data, user_prompt, temp, system_prompt)
2104
+ elif api_endpoint.lower() == "deepseek":
2105
+ summarization_response = summarize_with_deepseek(api_key, input_data, user_prompt, temp, system_prompt)
2106
+ elif api_endpoint.lower() == "llama.cpp":
2107
+ summarization_response = summarize_with_llama(input_data, user_prompt, temp, system_prompt)
2108
+ elif api_endpoint.lower() == "kobold":
2109
+ summarization_response = summarize_with_kobold(input_data, api_key, user_prompt, temp, system_prompt)
2110
+ elif api_endpoint.lower() == "ooba":
2111
+ summarization_response = summarize_with_oobabooga(input_data, api_key, user_prompt, temp, system_prompt)
2112
+ elif api_endpoint.lower() == "tabbyapi":
2113
+ summarization_response = summarize_with_tabbyapi(input_data, user_prompt, temp, system_prompt)
2114
+ elif api_endpoint.lower() == "vllm":
2115
+ summarization_response = summarize_with_vllm(input_data, user_prompt, system_prompt)
2116
+ elif api_endpoint.lower() == "local-llm":
2117
+ summarization_response = summarize_with_local_llm(input_data, user_prompt, temp, system_prompt)
2118
+ elif api_endpoint.lower() == "huggingface":
2119
+ summarization_response = summarize_with_huggingface(api_key, input_data, user_prompt, temp, system_prompt)
2120
+ elif api_endpoint.lower() == "ollama":
2121
+ summarization_response = summarize_with_ollama(input_data, user_prompt, temp, system_prompt)
2122
+ else:
2123
+ raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
2124
+ except Exception as e:
2125
+ logging.error(f"Error in summarization: {str(e)}")
2126
+ response1 = f"An error occurred during summarization: {str(e)}"
2127
+
2128
+ try:
2129
+ if explanation:
2130
+ system_prompt = """You are a professional teacher. Please explain the content presented in an easy to digest fashion so that a non-specialist may understand it."""
2131
+ # Use the existing API request code based on the selected endpoint
2132
+ logging.info(f"Debug - Chat Function - API Endpoint: {api_endpoint}")
2133
+ if api_endpoint.lower() == 'openai':
2134
+ explanation_response = summarize_with_openai(api_key, input_data, user_prompt, temp, system_prompt)
2135
+ elif api_endpoint.lower() == "anthropic":
2136
+ explanation_response = summarize_with_anthropic(api_key, input_data, user_prompt, temp, system_prompt)
2137
+ elif api_endpoint.lower() == "cohere":
2138
+ explanation_response = summarize_with_cohere(api_key, input_data, user_prompt, temp, system_prompt)
2139
+ elif api_endpoint.lower() == "groq":
2140
+ explanation_response = summarize_with_groq(api_key, input_data, user_prompt, temp, system_prompt)
2141
+ elif api_endpoint.lower() == "openrouter":
2142
+ explanation_response = summarize_with_openrouter(api_key, input_data, user_prompt, temp, system_prompt)
2143
+ elif api_endpoint.lower() == "deepseek":
2144
+ explanation_response = summarize_with_deepseek(api_key, input_data, user_prompt, temp, system_prompt)
2145
+ elif api_endpoint.lower() == "llama.cpp":
2146
+ explanation_response = summarize_with_llama(input_data, user_prompt, temp, system_prompt)
2147
+ elif api_endpoint.lower() == "kobold":
2148
+ explanation_response = summarize_with_kobold(input_data, api_key, user_prompt, temp, system_prompt)
2149
+ elif api_endpoint.lower() == "ooba":
2150
+ explanation_response = summarize_with_oobabooga(input_data, api_key, user_prompt, temp, system_prompt)
2151
+ elif api_endpoint.lower() == "tabbyapi":
2152
+ explanation_response = summarize_with_tabbyapi(input_data, user_prompt, temp, system_prompt)
2153
+ elif api_endpoint.lower() == "vllm":
2154
+ explanation_response = summarize_with_vllm(input_data, user_prompt, system_prompt)
2155
+ elif api_endpoint.lower() == "local-llm":
2156
+ explanation_response = summarize_with_local_llm(input_data, user_prompt, temp, system_prompt)
2157
+ elif api_endpoint.lower() == "huggingface":
2158
+ explanation_response = summarize_with_huggingface(api_key, input_data, user_prompt, temp, system_prompt)
2159
+ elif api_endpoint.lower() == "ollama":
2160
+ explanation_response = summarize_with_ollama(input_data, user_prompt, temp, system_prompt)
2161
+ else:
2162
+ raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
2163
+ except Exception as e:
2164
+ logging.error(f"Error in summarization: {str(e)}")
2165
+ response2 = f"An error occurred during summarization: {str(e)}"
2166
+
2167
+ if summarization_response:
2168
+ response1 = f"Summary: {summarization_response}"
2169
+ else:
2170
+ response1 = "Summary: No summary requested"
2171
+
2172
+ if explanation_response:
2173
+ response2 = f"Explanation: {explanation_response}"
2174
+ else:
2175
+ response2 = "Explanation: No explanation requested"
2176
+
2177
+ return response1, response2
2178
+
2179
+ except Exception as e:
2180
+ logging.error(f"Error in chat function: {str(e)}")
2181
+ return f"An error occurred: {str(e)}"
2182
+
2183
+
2184
  ############################################################################################################################################################################################################################
2185
  #
2186
  # Transcript Comparison Tab
 
2265
 
2266
  ### End of under construction section
2267
 
 
 
 
2268
  #
2269
  #
2270
  ###########################################################################################################################################################################################################################
 
2773
  if current_dir_model:
2774
  return current_dir_model
2775
  elif parent_dir_model:
2776
+ return os.path.join("..", parent_dir_model)
2777
  else:
2778
  return ""
2779
 
 
2829
  # Chat Interface Tab Functions
2830
 
2831
 
 
 
 
2832
  def chat(message, history, media_content, selected_parts, api_endpoint, api_key, prompt, temperature,
2833
  system_message=None):
2834
  try:
 
2851
 
2852
  # Prepare the input for the API
2853
  if not history:
2854
+ input_data = f"{combined_content}\n\nUser: {message}\n"
2855
  else:
2856
+ input_data = f"User: {message}\n"
2857
  # Print first 500 chars
2858
  logging.info(f"Debug - Chat Function - Input Data: {input_data[:500]}...")
2859
 
 
2890
  elif api_endpoint.lower() == "tabbyapi":
2891
  response = summarize_with_tabbyapi(input_data, prompt, temp, system_message)
2892
  elif api_endpoint.lower() == "vllm":
2893
+ response = summarize_with_vllm(input_data, prompt, system_message)
2894
  elif api_endpoint.lower() == "local-llm":
2895
  response = summarize_with_local_llm(input_data, prompt, temp, system_message)
2896
  elif api_endpoint.lower() == "huggingface":
2897
  response = summarize_with_huggingface(api_key, input_data, prompt, temp, system_message)
2898
+ elif api_endpoint.lower() == "ollama":
2899
+ response = summarize_with_ollama(input_data, prompt, temp, system_message)
2900
  else:
2901
  raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
2902
 
 
2908
 
2909
 
2910
  def save_chat_history_to_db_wrapper(chatbot, conversation_id, media_content):
2911
+ logging.info(f"Attempting to save chat history. Media content type: {type(media_content)}")
2912
  try:
2913
+ # Extract the media_id and media_name from the media_content
2914
  media_id = None
2915
+ media_name = None
2916
+ if isinstance(media_content, dict):
2917
+ logging.debug(f"Media content keys: {media_content.keys()}")
2918
+ if 'content' in media_content:
2919
+ try:
2920
+ content = media_content['content']
2921
+ if isinstance(content, str):
2922
+ content_json = json.loads(content)
2923
+ elif isinstance(content, dict):
2924
+ content_json = content
2925
+ else:
2926
+ raise ValueError(f"Unexpected content type: {type(content)}")
2927
+
2928
+ # Use the webpage_url as the media_id
2929
+ media_id = content_json.get('webpage_url')
2930
+ # Use the title as the media_name
2931
+ media_name = content_json.get('title')
2932
+
2933
+ logging.info(f"Extracted media_id: {media_id}, media_name: {media_name}")
2934
+ except json.JSONDecodeError:
2935
+ logging.error("Failed to decode JSON from media_content['content']")
2936
+ except Exception as e:
2937
+ logging.error(f"Error processing media_content: {str(e)}")
2938
+ else:
2939
+ logging.warning("'content' key not found in media_content")
2940
+ else:
2941
+ logging.warning(f"media_content is not a dictionary. Type: {type(media_content)}")
2942
 
2943
  if media_id is None:
2944
  # If we couldn't find a media_id, we'll use a placeholder
2945
  media_id = "unknown_media"
2946
  logging.warning(f"Unable to extract media_id from media_content. Using placeholder: {media_id}")
2947
 
2948
+ if media_name is None:
2949
+ media_name = "Unnamed Media"
2950
+ logging.warning(f"Unable to extract media_name from media_content. Using placeholder: {media_name}")
2951
+
2952
  # Generate a unique conversation name using media_id and current timestamp
2953
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2954
  conversation_name = f"Chat_{media_id}_{timestamp}"
2955
 
2956
+ new_conversation_id = save_chat_history_to_database(chatbot, conversation_id, media_id, media_name, conversation_name)
2957
  return new_conversation_id, f"Chat history saved successfully as {conversation_name}!"
2958
  except Exception as e:
2959
  error_message = f"Failed to save chat history: {str(e)}"
2960
+ logging.error(error_message, exc_info=True)
2961
  return conversation_id, error_message
2962
 
2963
 
2964
+ def save_chat_history(history, conversation_id, media_content):
2965
+ try:
2966
+ content, conversation_name = generate_chat_history_content(history, conversation_id, media_content)
2967
+
2968
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2969
+ safe_conversation_name = re.sub(r'[^a-zA-Z0-9_-]', '_', conversation_name)
2970
+ base_filename = f"{safe_conversation_name}_{timestamp}.json"
2971
+
2972
+ # Create a temporary file
2973
+ with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json') as temp_file:
2974
+ temp_file.write(content)
2975
+ temp_file_path = temp_file.name
2976
+
2977
+ # Generate a unique filename
2978
+ unique_filename = generate_unique_filename(os.path.dirname(temp_file_path), base_filename)
2979
+ final_path = os.path.join(os.path.dirname(temp_file_path), unique_filename)
2980
+
2981
+ # Rename the temporary file to the unique filename
2982
+ os.rename(temp_file_path, final_path)
2983
+
2984
+ return final_path
2985
+ except Exception as e:
2986
+ logging.error(f"Error saving chat history: {str(e)}")
2987
+ return None
2988
+
2989
+
2990
+ def generate_chat_history_content(history, conversation_id, media_content):
2991
  timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
2992
+
2993
+ conversation_name = get_conversation_name(conversation_id)
2994
+
2995
+ if not conversation_name:
2996
+ media_name = extract_media_name(media_content)
2997
+ if media_name:
2998
+ conversation_name = f"{media_name}-chat"
2999
+ else:
3000
+ conversation_name = f"chat-{timestamp}" # Fallback name
3001
 
3002
  chat_data = {
3003
  "conversation_id": conversation_id,
3004
+ "conversation_name": conversation_name,
3005
  "timestamp": timestamp,
3006
  "history": [
3007
  {
 
3012
  ]
3013
  }
3014
 
3015
+ return json.dumps(chat_data, indent=2), conversation_name
3016
 
 
 
 
 
3017
 
3018
+ def extract_media_name(media_content):
3019
+ if isinstance(media_content, dict):
3020
+ content = media_content.get('content', {})
3021
+ if isinstance(content, str):
3022
+ try:
3023
+ content = json.loads(content)
3024
+ except json.JSONDecodeError:
3025
+ logging.warning("Failed to parse media_content JSON string")
3026
+ return None
3027
 
3028
+ # Try to extract title from the content
3029
+ if isinstance(content, dict):
3030
+ return content.get('title') or content.get('name')
3031
+
3032
+ logging.warning(f"Unexpected media_content format: {type(media_content)}")
3033
+ return None
3034
 
3035
  def show_edit_message(selected):
3036
  if selected:
 
3114
  return {"title": "", "details": "", "system_prompt": "", "user_prompt": ""}
3115
 
3116
 
3117
+ def clear_chat():
3118
+ # Return empty list for chatbot and None for conversation_id
3119
+ return gr.update(value=[]), None
3120
+
3121
+
3122
  # FIXME - add additional features....
3123
  def chat_wrapper(message, history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, conversation_id, save_conversation, temperature, system_prompt, max_tokens=None, top_p=None, frequency_penalty=None, presence_penalty=None, stop_sequence=None):
3124
  try:
 
3188
  return history, conversation_id
3189
 
3190
 
 
 
 
 
3191
  def update_message_in_chat(message_id, new_text, history):
3192
  update_chat_message(message_id, new_text)
3193
  updated_history = [(msg1, msg2) if msg1[1] != message_id and msg2[1] != message_id
 
3238
  with gr.Row():
3239
  load_conversations_btn = gr.Button("Load Selected Conversation")
3240
 
3241
+ api_endpoint = gr.Dropdown(label="Select API Endpoint", choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
3242
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"])
3243
+ api_key = gr.Textbox(label="API Key (if required)", type="password")
 
 
 
 
3244
  custom_prompt_checkbox = gr.Checkbox(label="Use a Custom Prompt",
3245
  value=False,
3246
  visible=True)
 
3262
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
3263
  msg = gr.Textbox(label="Enter your message")
3264
  submit = gr.Button("Submit")
3265
+ clear_chat_button = gr.Button("Clear Chat")
3266
 
3267
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
3268
  edit_message_text = gr.Textbox(label="Edit Message", visible=False)
 
3274
  save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
3275
  save_chat_history_as_file = gr.Button("Save Chat History as File")
3276
  download_file = gr.File(label="Download Chat History")
3277
+ save_status = gr.Textbox(label="Save Status", interactive=False)
3278
 
3279
  # Restore original functionality
3280
  search_button.click(
 
3283
  outputs=[items_output, item_mapping]
3284
  )
3285
 
3286
+ def save_chat_wrapper(history, conversation_id, media_content):
3287
+ file_path = save_chat_history(history, conversation_id, media_content)
3288
+ if file_path:
3289
+ return file_path, f"Chat history saved successfully as {os.path.basename(file_path)}!"
3290
+ else:
3291
+ return None, "Error saving chat history. Please check the logs and try again."
3292
+
3293
+ save_chat_history_as_file.click(
3294
+ save_chat_wrapper,
3295
+ inputs=[chatbot, conversation_id, media_content],
3296
+ outputs=[download_file, save_status]
3297
+ )
3298
+
3299
  def update_prompts(preset_name):
3300
  prompts = update_user_prompt(preset_name)
3301
  return (
 
3303
  gr.update(value=prompts["system_prompt"], visible=True)
3304
  )
3305
 
3306
+ def clear_chat():
3307
+ return [], None # Return empty list for chatbot and None for conversation_id
3308
+
3309
+ clear_chat_button.click(
3310
+ clear_chat,
3311
+ outputs=[chatbot, conversation_id]
3312
+ )
3313
  preset_prompt.change(
3314
  update_prompts,
3315
  inputs=preset_prompt,
 
3336
  inputs=[chatbot],
3337
  outputs=[msg]
3338
  ).then(# Clear the user prompt after the first message
3339
+ lambda: (gr.update(value=""), gr.update(value="")),
3340
  outputs=[user_prompt, system_prompt_input]
3341
  )
3342
 
 
3402
  chatbot.select(show_delete_message, None, [delete_message_id, delete_message_button])
3403
 
3404
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3405
  def create_chat_interface_stacked():
3406
  custom_css = """
3407
  .chatbot-container .message-wrap .message {
 
3436
  search_conversations_btn = gr.Button("Search Conversations")
3437
  load_conversations_btn = gr.Button("Load Selected Conversation")
3438
  with gr.Column():
3439
+ api_endpoint = gr.Dropdown(label="Select API Endpoint", choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "OpenRouter", "Mistral", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"])
3440
+ api_key = gr.Textbox(label="API Key (if required)", type="password")
 
 
 
 
3441
  preset_prompt = gr.Dropdown(label="Select Preset Prompt",
3442
  choices=load_preset_prompts(),
3443
  visible=True)
 
3457
  with gr.Row():
3458
  with gr.Column():
3459
  submit = gr.Button("Submit")
3460
+ clear_chat_button = gr.Button("Clear Chat")
3461
 
3462
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
3463
  edit_message_text = gr.Textbox(label="Edit Message", visible=False)
 
3484
  gr.update(value=prompts["system_prompt"], visible=True)
3485
  )
3486
 
3487
+ clear_chat_button.click(
3488
+ clear_chat,
3489
+ outputs=[chatbot, conversation_id]
3490
+ )
3491
  preset_prompt.change(
3492
  update_prompts,
3493
  inputs=preset_prompt,
 
3612
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
3613
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
3614
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba",
3615
+ "Tabbyapi", "VLLM","ollama", "HuggingFace"])
3616
+ api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password")
 
 
 
3617
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
3618
  chatbot = gr.Chatbot(height=800, elem_classes="chat-window")
3619
  chatbots.append(chatbot)
 
3624
  with gr.Row():
3625
  msg = gr.Textbox(label="Enter your message", scale=4)
3626
  submit = gr.Button("Submit", scale=1)
3627
+ # FIXME - clear chat
3628
+ # clear_chat_button = gr.Button("Clear Chat")
3629
+ #
3630
+ # clear_chat_button.click(
3631
+ # clear_chat,
3632
+ # outputs=[chatbot]
3633
+ # )
3634
 
3635
  # State variables
3636
  chat_history = [gr.State([]) for _ in range(3)]
 
3730
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
3731
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
3732
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold", "Ooba",
3733
+ "Tabbyapi", "VLLM","ollama", "HuggingFace"])
3734
+ api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password")
 
3735
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.1, value=0.7)
3736
  chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
3737
  msg = gr.Textbox(label=f"Enter your message for Chat {i + 1}")
 
3795
  return new_msg, updated_chatbot, new_history, new_conv_id
3796
 
3797
 
3798
+ # FIXME - Finish implementing functions + testing/valdidation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3799
  def create_chat_management_tab():
3800
  with gr.TabItem("Chat Management"):
3801
  gr.Markdown("# Chat Management")
 
3807
  conversation_list = gr.Dropdown(label="Select Conversation", choices=[])
3808
  conversation_mapping = gr.State({})
3809
 
3810
+ with gr.Tabs():
3811
+ with gr.TabItem("Edit"):
3812
+ chat_content = gr.TextArea(label="Chat Content (JSON)", lines=20, max_lines=50)
3813
+ save_button = gr.Button("Save Changes")
3814
+
3815
+ with gr.TabItem("Preview"):
3816
+ chat_preview = gr.HTML(label="Chat Preview")
3817
+ result_message = gr.Markdown("")
3818
+
3819
+ def search_conversations(query):
3820
+ conversations = search_chat_conversations(query)
3821
+ choices = [f"{conv['conversation_name']} (Media: {conv['media_title']}, ID: {conv['id']})" for conv in
3822
+ conversations]
3823
+ mapping = {choice: conv['id'] for choice, conv in zip(choices, conversations)}
3824
+ return gr.update(choices=choices), mapping
3825
+
3826
+ def load_conversations(selected, conversation_mapping):
3827
+ logging.info(f"Selected: {selected}")
3828
+ logging.info(f"Conversation mapping: {conversation_mapping}")
3829
+
3830
+ try:
3831
+ if selected and selected in conversation_mapping:
3832
+ conversation_id = conversation_mapping[selected]
3833
+ messages = get_chat_messages(conversation_id)
3834
+ conversation_data = {
3835
+ "conversation_id": conversation_id,
3836
+ "messages": messages
3837
+ }
3838
+ json_content = json.dumps(conversation_data, indent=2)
3839
+
3840
+ # Create HTML preview
3841
+ html_preview = "<div style='max-height: 500px; overflow-y: auto;'>"
3842
+ for msg in messages:
3843
+ sender_style = "background-color: #e6f3ff;" if msg[
3844
+ 'sender'] == 'user' else "background-color: #f0f0f0;"
3845
+ html_preview += f"<div style='margin-bottom: 10px; padding: 10px; border-radius: 5px; {sender_style}'>"
3846
+ html_preview += f"<strong>{msg['sender']}:</strong> {html.escape(msg['message'])}<br>"
3847
+ html_preview += f"<small>Timestamp: {msg['timestamp']}</small>"
3848
+ html_preview += "</div>"
3849
+ html_preview += "</div>"
3850
+
3851
+ logging.info("Returning json_content and html_preview")
3852
+ return json_content, html_preview
3853
+ else:
3854
+ logging.warning("No conversation selected or not in mapping")
3855
+ return "", "<p>No conversation selected</p>"
3856
+ except Exception as e:
3857
+ logging.error(f"Error in load_conversations: {str(e)}")
3858
+ return f"Error: {str(e)}", "<p>Error loading conversation</p>"
3859
+
3860
+ def validate_conversation_json(content):
3861
+ try:
3862
+ data = json.loads(content)
3863
+ if not isinstance(data, dict):
3864
+ return False, "Invalid JSON structure: root should be an object"
3865
+ if "conversation_id" not in data or not isinstance(data["conversation_id"], int):
3866
+ return False, "Missing or invalid conversation_id"
3867
+ if "messages" not in data or not isinstance(data["messages"], list):
3868
+ return False, "Missing or invalid messages array"
3869
+ for msg in data["messages"]:
3870
+ if not all(key in msg for key in ["sender", "message"]):
3871
+ return False, "Invalid message structure: missing required fields"
3872
+ return True, data
3873
+ except json.JSONDecodeError as e:
3874
+ return False, f"Invalid JSON: {str(e)}"
3875
+
3876
+ def save_conversation(selected, conversation_mapping, content):
3877
+ if not selected or selected not in conversation_mapping:
3878
+ return "Please select a conversation before saving.", "<p>No changes made</p>"
3879
+
3880
+ conversation_id = conversation_mapping[selected]
3881
+ is_valid, result = validate_conversation_json(content)
3882
+
3883
+ if not is_valid:
3884
+ return f"Error: {result}", "<p>No changes made due to error</p>"
3885
+
3886
+ conversation_data = result
3887
+ if conversation_data["conversation_id"] != conversation_id:
3888
+ return "Error: Conversation ID mismatch.", "<p>No changes made due to ID mismatch</p>"
3889
+
3890
+ try:
3891
+ with db.get_connection() as conn:
3892
+ conn.execute("BEGIN TRANSACTION")
3893
+ cursor = conn.cursor()
3894
+
3895
+ # Backup original conversation
3896
+ cursor.execute("SELECT * FROM ChatMessages WHERE conversation_id = ?", (conversation_id,))
3897
+ original_messages = cursor.fetchall()
3898
+ backup_data = json.dumps({"conversation_id": conversation_id, "messages": original_messages})
3899
 
3900
+ # You might want to save this backup_data somewhere
3901
 
3902
+ # Delete existing messages
3903
+ cursor.execute("DELETE FROM ChatMessages WHERE conversation_id = ?", (conversation_id,))
 
3904
 
3905
+ # Insert updated messages
3906
+ for message in conversation_data["messages"]:
3907
+ cursor.execute('''
3908
+ INSERT INTO ChatMessages (conversation_id, sender, message, timestamp)
3909
+ VALUES (?, ?, ?, COALESCE(?, CURRENT_TIMESTAMP))
3910
+ ''', (conversation_id, message["sender"], message["message"], message.get("timestamp")))
3911
 
3912
+ conn.commit()
 
 
 
 
 
3913
 
3914
+ # Create updated HTML preview
3915
+ html_preview = "<div style='max-height: 500px; overflow-y: auto;'>"
3916
+ for msg in conversation_data["messages"]:
3917
+ sender_style = "background-color: #e6f3ff;" if msg['sender'] == 'user' else "background-color: #f0f0f0;"
3918
+ html_preview += f"<div style='margin-bottom: 10px; padding: 10px; border-radius: 5px; {sender_style}'>"
3919
+ html_preview += f"<strong>{msg['sender']}:</strong> {html.escape(msg['message'])}<br>"
3920
+ html_preview += f"<small>Timestamp: {msg.get('timestamp', 'N/A')}</small>"
3921
+ html_preview += "</div>"
3922
+ html_preview += "</div>"
3923
 
3924
+ return "Conversation updated successfully.", html_preview
3925
+ except sqlite3.Error as e:
3926
+ conn.rollback()
3927
+ logging.error(f"Database error in save_conversation: {e}")
3928
+ return f"Error updating conversation: {str(e)}", "<p>Error occurred while saving</p>"
3929
+ except Exception as e:
3930
+ conn.rollback()
3931
+ logging.error(f"Unexpected error in save_conversation: {e}")
3932
+ return f"Unexpected error: {str(e)}", "<p>Unexpected error occurred</p>"
3933
+
3934
+ def parse_formatted_content(formatted_content):
3935
+ lines = formatted_content.split('\n')
3936
+ conversation_id = int(lines[0].split(': ')[1])
3937
+ timestamp = lines[1].split(': ')[1]
3938
+ history = []
3939
+ current_role = None
3940
+ current_content = None
3941
+ for line in lines[3:]:
3942
+ if line.startswith("Role: "):
3943
+ if current_role is not None:
3944
+ history.append({"role": current_role, "content": ["", current_content]})
3945
+ current_role = line.split(': ')[1]
3946
+ elif line.startswith("Content: "):
3947
+ current_content = line.split(': ', 1)[1]
3948
+ if current_role is not None:
3949
+ history.append({"role": current_role, "content": ["", current_content]})
3950
+ return json.dumps({
3951
+ "conversation_id": conversation_id,
3952
+ "timestamp": timestamp,
3953
+ "history": history
3954
+ }, indent=2)
3955
 
3956
  search_button.click(
3957
  search_conversations,
 
3960
  )
3961
 
3962
  conversation_list.change(
3963
+ load_conversations,
3964
+ inputs=[conversation_list, conversation_mapping],
3965
+ outputs=[chat_content, chat_preview]
3966
  )
3967
+
3968
+ save_button.click(
3969
+ save_conversation,
3970
+ inputs=[conversation_list, conversation_mapping, chat_content],
3971
+ outputs=[result_message, chat_preview]
 
 
 
 
 
 
 
 
 
3972
  )
3973
 
3974
+ return search_query, search_button, conversation_list, conversation_mapping, chat_content, save_button, result_message, chat_preview
3975
 
3976
  #
3977
  # End of Chat Interface Tab Functions
 
4421
  errors.append(error_msg)
4422
 
4423
  progress((i + 1) / total_files, f"Imported {imported_files} of {total_files} files")
4424
+ sleep(0.1) # Small delay to prevent UI freezing
4425
 
4426
  return imported_files, total_files, errors
4427
  except Exception as e:
 
4544
  auto_summarize_checkbox = gr.Checkbox(label="Auto-summarize", value=False)
4545
  api_name_input = gr.Dropdown(
4546
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
4547
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
4548
+ label="API for Auto-summarization"
 
4549
  )
4550
+ api_key_input = gr.Textbox(label="API Key", type="password")
 
 
4551
  with gr.Row():
4552
  import_button = gr.Button("Import Data")
4553
  with gr.Row():
 
4878
  auto_summarize_checkbox = gr.Checkbox(label="Auto-summarize", value=False)
4879
  api_name_input = gr.Dropdown(
4880
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
4881
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
4882
+ label="API for Auto-summarization"
 
4883
  )
4884
+ api_key_input = gr.Textbox(label="API Key", type="password")
 
 
4885
  import_button = gr.Button("Import eBook")
4886
  with gr.Column():
4887
  with gr.Row():
4888
  import_output = gr.Textbox(label="Import Status")
4889
 
4890
+ def import_epub(epub_file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key):
4891
  try:
4892
  # Create a temporary directory to store the converted file
4893
  with tempfile.TemporaryDirectory() as temp_dir:
 
4905
  content = md_file.read()
4906
 
4907
  # Now process the content as you would with a text file
4908
+ return import_data(content, title, author, keywords, system_prompt,
4909
+ user_prompt, auto_summarize, api_name, api_key)
4910
  except Exception as e:
4911
  return f"Error processing EPUB: {str(e)}"
4912
 
 
5278
 
5279
  prompt = f"Rewrite the following text to match these tones: {tone_prompt}. Text: {text}"
5280
  # Performing tone adjustment request...
5281
+ adjusted_text = perform_summarization(api_name, text, prompt, api_key)
5282
 
5283
  return adjusted_text
5284
 
 
5302
  custom_prompt_checkbox = gr.Checkbox(label="Use Custom Prompt", value=False, visible=True)
5303
  system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Please analyze the provided text for grammar and style. Offer any suggestions or points to improve you can identify. Additionally please point out any misuses of any words or incorrect spellings.", lines=5, visible=False)
5304
  custom_prompt_input = gr.Textbox(label="user Prompt",
5305
+ value="""<s>You are a bulleted notes specialist. [INST]```When creating comprehensive bulleted notes, you should follow these guidelines: Use multiple headings based on the referenced topics, not categories like quotes or terms. Headings should be surrounded by bold formatting and not be listed as bullet points themselves. Leave no space between headings and their corresponding list items underneath. Important terms within the content should be emphasized by setting them in bold font. Any text that ends with a colon should also be bolded. Before submitting your response, review the instructions, and make any corrections necessary to adhered to the specified format. Do not reference these instructions within the notes.``` \nBased on the content between backticks create comprehensive bulleted notes.[/INST]
5306
+ **Bulleted Note Creation Guidelines**
5307
+
5308
+ **Headings**:
5309
+ - Based on referenced topics, not categories like quotes or terms
5310
+ - Surrounded by **bold** formatting
5311
+ - Not listed as bullet points
5312
+ - No space between headings and list items underneath
5313
+
5314
+ **Emphasis**:
5315
+ - **Important terms** set in bold font
5316
+ - **Text ending in a colon**: also bolded
5317
+
5318
+ **Review**:
5319
+ - Ensure adherence to specified format
5320
+ - Do not reference these instructions in your response.</s>[INST] {{ .Prompt }} [/INST]
5321
+ """,
5322
  lines=3,
5323
  visible=False)
5324
  custom_prompt_checkbox.change(
 
5328
  )
5329
  api_name_input = gr.Dropdown(
5330
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
5331
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
5332
+ value=None,
5333
  label="API for Grammar Check"
5334
  )
5335
  api_key_input = gr.Textbox(label="API Key (if not set in config.txt)", placeholder="Enter your API key here",
5336
+ type="password")
 
5337
  check_grammar_button = gr.Button("Check Grammar and Style")
5338
 
5339
  with gr.Column():
 
5357
  casual_slider = gr.Slider(minimum=0, maximum=1, value=0.5, label="Casual vs Professional")
5358
  api_name_input = gr.Dropdown(
5359
  choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
5360
+ "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM","ollama", "HuggingFace"],
5361
+ value=None,
5362
  label="API for Grammar Check"
5363
  )
5364
  api_key_input = gr.Textbox(label="API Key (if not set in config.txt)", placeholder="Enter your API key here",
5365
+ type="password")
 
5366
  adjust_btn = gr.Button("Adjust Tone")
5367
 
5368
  with gr.Column():
 
5498
  create_import_book_tab()
5499
  create_website_scraping_tab()
5500
  create_pdf_ingestion_tab()
5501
+ create_pdf_ingestion_test_tab()
5502
  create_resummary_tab()
5503
+ create_summarize_explain_tab()
5504
 
5505
  with gr.TabItem("Search / Detailed View"):
5506
  create_search_tab()
 
5510
  create_prompt_view_tab()
5511
 
5512
  with gr.TabItem("Chat with an LLM"):
 
 
5513
  create_chat_interface()
5514
  create_chat_interface_stacked()
5515
  create_chat_interface_multi_api()
 
5518
  create_llamafile_settings_tab()
5519
 
5520
  with gr.TabItem("Edit Existing Items"):
 
5521
  create_media_edit_tab()
5522
  create_media_edit_and_clone_tab()
5523
  create_prompt_edit_tab()
5524
  create_prompt_clone_tab()
5525
+ # FIXME
5526
+ #create_compare_transcripts_tab()
5527
 
5528
  with gr.TabItem("Writing Tools"):
5529
  create_document_editing_tab()