Spaces:
Sleeping
Sleeping
oceansweep
commited on
Commit
•
cd5e862
1
Parent(s):
0b007b7
Upload 20 files
Browse files- App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Chat_ui.py +129 -57
- App_Function_Libraries/Gradio_UI/Export_Functionality.py +1 -1
- App_Function_Libraries/Gradio_UI/Gradio_Shared.py +3 -3
- App_Function_Libraries/Gradio_UI/Import_Functionality.py +1 -1
- App_Function_Libraries/Gradio_UI/Introduction_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Keywords.py +1 -1
- App_Function_Libraries/Gradio_UI/Media_edit.py +3 -3
- App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Podcast_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Re_summarize_tab.py +2 -2
- App_Function_Libraries/Gradio_UI/Search_Tab.py +3 -3
- App_Function_Libraries/Gradio_UI/Transcript_comparison.py +2 -2
- App_Function_Libraries/Gradio_UI/Trash.py +1 -1
- App_Function_Libraries/Gradio_UI/Utilities.py +1 -1
- App_Function_Libraries/Gradio_UI/Video_transcription_tab.py +43 -34
- App_Function_Libraries/Gradio_UI/Website_scraping_tab.py +1 -1
App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py
CHANGED
@@ -8,7 +8,7 @@ import gradio as gr
|
|
8 |
#
|
9 |
# Local Imports
|
10 |
from App_Function_Libraries.Audio_Files import process_audio_files
|
11 |
-
from App_Function_Libraries.DB_Manager import load_preset_prompts
|
12 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models
|
14 |
#
|
|
|
8 |
#
|
9 |
# Local Imports
|
10 |
from App_Function_Libraries.Audio_Files import process_audio_files
|
11 |
+
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
12 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models
|
14 |
#
|
App_Function_Libraries/Gradio_UI/Chat_ui.py
CHANGED
@@ -15,7 +15,7 @@ import gradio as gr
|
|
15 |
#
|
16 |
# Local Imports
|
17 |
from App_Function_Libraries.Chat import chat, save_chat_history, update_chat_content, save_chat_history_to_db_wrapper
|
18 |
-
from App_Function_Libraries.DB_Manager import add_chat_message, search_chat_conversations, create_chat_conversation, \
|
19 |
get_chat_messages, update_chat_message, delete_chat_message, load_preset_prompts, db
|
20 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_user_prompt
|
21 |
|
@@ -941,12 +941,6 @@ def create_chat_management_tab():
|
|
941 |
return search_query, search_button, conversation_list, conversation_mapping, chat_content, save_button, result_message, chat_preview
|
942 |
|
943 |
|
944 |
-
# FIXME - busted and incomplete
|
945 |
-
# Mock function to simulate LLM processing
|
946 |
-
def process_with_llm(workflow, context, prompt):
|
947 |
-
return f"LLM output for {workflow} with context: {context[:30]}... and prompt: {prompt[:30]}..."
|
948 |
-
|
949 |
-
|
950 |
# Load workflows from a JSON file
|
951 |
json_path = Path('./Helper_Scripts/Workflows/Workflows.json')
|
952 |
with json_path.open('r') as f:
|
@@ -956,61 +950,139 @@ with json_path.open('r') as f:
|
|
956 |
# FIXME - broken Completely. Doesn't work.
|
957 |
def chat_workflows_tab():
|
958 |
with gr.TabItem("Chat Workflows"):
|
959 |
-
|
960 |
-
gr.Markdown("# Workflows using LLMs")
|
961 |
|
|
|
962 |
workflow_selector = gr.Dropdown(label="Select Workflow", choices=[wf['name'] for wf in workflows])
|
963 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
964 |
|
965 |
-
|
966 |
-
|
967 |
-
|
|
|
|
|
|
|
968 |
output_boxes = []
|
969 |
-
|
970 |
-
|
971 |
-
|
972 |
-
|
973 |
-
|
974 |
-
|
975 |
-
|
976 |
-
|
977 |
-
|
978 |
-
|
979 |
-
|
980 |
-
|
981 |
-
|
982 |
-
|
983 |
-
|
984 |
-
|
985 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
986 |
|
987 |
-
|
988 |
-
|
989 |
-
|
990 |
-
|
991 |
-
|
992 |
-
|
993 |
-
|
994 |
-
|
995 |
-
|
996 |
-
|
997 |
-
|
998 |
-
|
999 |
-
|
1000 |
-
|
1001 |
-
|
1002 |
-
|
1003 |
-
|
1004 |
-
|
1005 |
-
else:
|
1006 |
-
prompt_inputs[i].visible = False
|
1007 |
-
process_buttons[i].visible = False
|
1008 |
-
output_boxes[i].visible = False
|
1009 |
-
|
1010 |
-
# Bind the workflow selector to update the UI
|
1011 |
-
workflow_selector.change(update_prompt_sections, inputs=[workflow_selector], outputs=[])
|
1012 |
-
|
1013 |
-
return chat_workflows_block
|
1014 |
|
1015 |
#
|
1016 |
# End of Chat_ui.py
|
|
|
15 |
#
|
16 |
# Local Imports
|
17 |
from App_Function_Libraries.Chat import chat, save_chat_history, update_chat_content, save_chat_history_to_db_wrapper
|
18 |
+
from App_Function_Libraries.DB.DB_Manager import add_chat_message, search_chat_conversations, create_chat_conversation, \
|
19 |
get_chat_messages, update_chat_message, delete_chat_message, load_preset_prompts, db
|
20 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_user_prompt
|
21 |
|
|
|
941 |
return search_query, search_button, conversation_list, conversation_mapping, chat_content, save_button, result_message, chat_preview
|
942 |
|
943 |
|
|
|
|
|
|
|
|
|
|
|
|
|
944 |
# Load workflows from a JSON file
|
945 |
json_path = Path('./Helper_Scripts/Workflows/Workflows.json')
|
946 |
with json_path.open('r') as f:
|
|
|
950 |
# FIXME - broken Completely. Doesn't work.
|
951 |
def chat_workflows_tab():
|
952 |
with gr.TabItem("Chat Workflows"):
|
953 |
+
gr.Markdown("# Workflows using LLMs")
|
|
|
954 |
|
955 |
+
with gr.Row():
|
956 |
workflow_selector = gr.Dropdown(label="Select Workflow", choices=[wf['name'] for wf in workflows])
|
957 |
+
api_selector = gr.Dropdown(
|
958 |
+
label="Select API Endpoint",
|
959 |
+
choices=["OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter",
|
960 |
+
"Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"],
|
961 |
+
value="OpenAI"
|
962 |
+
)
|
963 |
+
api_key_input = gr.Textbox(label="API Key (optional)", type="password")
|
964 |
|
965 |
+
context_input = gr.Textbox(label="Initial Context (optional)", lines=5)
|
966 |
+
|
967 |
+
# Create a container for dynamic components
|
968 |
+
with gr.Column() as dynamic_components:
|
969 |
+
prompt_displays = []
|
970 |
+
user_inputs = []
|
971 |
output_boxes = []
|
972 |
+
process_buttons = []
|
973 |
+
regenerate_buttons = []
|
974 |
+
|
975 |
+
# Create the maximum number of components needed
|
976 |
+
max_steps = max(len(wf['prompts']) for wf in workflows)
|
977 |
+
for i in range(max_steps):
|
978 |
+
prompt_displays.append(gr.Markdown(visible=False))
|
979 |
+
user_inputs.append(gr.Textbox(label=f"Your Response", lines=2, visible=False))
|
980 |
+
output_boxes.append(gr.Textbox(label=f"AI Output", lines=5, visible=False))
|
981 |
+
with gr.Row():
|
982 |
+
process_buttons.append(gr.Button(f"Process Step {i + 1}", visible=False))
|
983 |
+
regenerate_buttons.append(gr.Button(f"🔄 Regenerate", visible=False))
|
984 |
+
|
985 |
+
def update_workflow_ui(workflow_name):
|
986 |
+
selected_workflow = next(wf for wf in workflows if wf['name'] == workflow_name)
|
987 |
+
num_prompts = len(selected_workflow['prompts'])
|
988 |
+
|
989 |
+
prompt_updates = []
|
990 |
+
input_updates = []
|
991 |
+
output_updates = []
|
992 |
+
button_updates = []
|
993 |
+
regenerate_updates = []
|
994 |
+
|
995 |
+
for i in range(max_steps):
|
996 |
+
if i < num_prompts:
|
997 |
+
prompt_updates.append(
|
998 |
+
gr.update(value=f"**Step {i + 1}:** {selected_workflow['prompts'][i]}", visible=True))
|
999 |
+
input_updates.append(gr.update(value="", visible=True, interactive=(i == 0)))
|
1000 |
+
output_updates.append(gr.update(value="", visible=True))
|
1001 |
+
button_updates.append(gr.update(visible=(i == 0)))
|
1002 |
+
regenerate_updates.append(gr.update(visible=False))
|
1003 |
+
else:
|
1004 |
+
prompt_updates.append(gr.update(visible=False))
|
1005 |
+
input_updates.append(gr.update(visible=False))
|
1006 |
+
output_updates.append(gr.update(visible=False))
|
1007 |
+
button_updates.append(gr.update(visible=False))
|
1008 |
+
regenerate_updates.append(gr.update(visible=False))
|
1009 |
+
|
1010 |
+
return prompt_updates + input_updates + output_updates + button_updates + regenerate_updates
|
1011 |
+
|
1012 |
+
def process(context, user_inputs, workflow_name, api_endpoint, api_key, step):
|
1013 |
+
selected_workflow = next(wf for wf in workflows if wf['name'] == workflow_name)
|
1014 |
+
|
1015 |
+
# Build up the context from previous steps
|
1016 |
+
full_context = context + "\n\n"
|
1017 |
+
for i in range(step + 1):
|
1018 |
+
full_context += f"Question: {selected_workflow['prompts'][i]}\n"
|
1019 |
+
full_context += f"Answer: {user_inputs[i]}\n"
|
1020 |
+
if i < step:
|
1021 |
+
full_context += f"AI Output: {output_boxes[i].value}\n\n"
|
1022 |
+
|
1023 |
+
result = process_with_llm(workflow_name, full_context, selected_workflow['prompts'][step], api_endpoint,
|
1024 |
+
api_key)
|
1025 |
+
|
1026 |
+
prompt_updates = [gr.update() for _ in range(max_steps)]
|
1027 |
+
input_updates = []
|
1028 |
+
output_updates = [gr.update() for _ in range(max_steps)]
|
1029 |
+
button_updates = []
|
1030 |
+
regenerate_updates = []
|
1031 |
+
|
1032 |
+
for i in range(len(selected_workflow['prompts'])):
|
1033 |
+
if i == step:
|
1034 |
+
regenerate_updates.append(gr.update(visible=True))
|
1035 |
+
elif i == step + 1:
|
1036 |
+
input_updates.append(gr.update(interactive=True))
|
1037 |
+
button_updates.append(gr.update(visible=True))
|
1038 |
+
regenerate_updates.append(gr.update(visible=False))
|
1039 |
+
elif i > step + 1:
|
1040 |
+
input_updates.append(gr.update(interactive=False))
|
1041 |
+
button_updates.append(gr.update(visible=False))
|
1042 |
+
regenerate_updates.append(gr.update(visible=False))
|
1043 |
+
else:
|
1044 |
+
input_updates.append(gr.update(interactive=False))
|
1045 |
+
button_updates.append(gr.update(visible=False))
|
1046 |
+
regenerate_updates.append(gr.update(visible=True))
|
1047 |
+
|
1048 |
+
return [result] + prompt_updates + input_updates + output_updates + button_updates + regenerate_updates
|
1049 |
+
|
1050 |
+
# Set up event handlers
|
1051 |
+
workflow_selector.change(
|
1052 |
+
update_workflow_ui,
|
1053 |
+
inputs=[workflow_selector],
|
1054 |
+
outputs=prompt_displays + user_inputs + output_boxes + process_buttons + regenerate_buttons
|
1055 |
+
)
|
1056 |
+
|
1057 |
+
# Set up process button click events
|
1058 |
+
for i, button in enumerate(process_buttons):
|
1059 |
+
button.click(
|
1060 |
+
fn=lambda context, *user_inputs, wf_name, api_endpoint, api_key, step=i: process(context, user_inputs,
|
1061 |
+
wf_name, api_endpoint,
|
1062 |
+
api_key, step),
|
1063 |
+
inputs=[context_input] + user_inputs + [workflow_selector, api_selector, api_key_input],
|
1064 |
+
outputs=[output_boxes[
|
1065 |
+
i]] + prompt_displays + user_inputs + output_boxes + process_buttons + regenerate_buttons
|
1066 |
+
)
|
1067 |
|
1068 |
+
# Set up regenerate button click events
|
1069 |
+
for i, button in enumerate(regenerate_buttons):
|
1070 |
+
button.click(
|
1071 |
+
fn=lambda context, *user_inputs, wf_name, api_endpoint, api_key, step=i: process(context, user_inputs,
|
1072 |
+
wf_name, api_endpoint,
|
1073 |
+
api_key, step),
|
1074 |
+
inputs=[context_input] + user_inputs + [workflow_selector, api_selector, api_key_input],
|
1075 |
+
outputs=[output_boxes[
|
1076 |
+
i]] + prompt_displays + user_inputs + output_boxes + process_buttons + regenerate_buttons
|
1077 |
+
)
|
1078 |
+
|
1079 |
+
return workflow_selector, api_selector, api_key_input, context_input, dynamic_components
|
1080 |
+
|
1081 |
+
|
1082 |
+
# Mock function to simulate LLM processing
|
1083 |
+
def process_with_llm(workflow, context, prompt, api_endpoint, api_key):
|
1084 |
+
api_key_snippet = api_key[:5] + "..." if api_key else "Not provided"
|
1085 |
+
return f"LLM output using {api_endpoint} (API Key: {api_key_snippet}) for {workflow} with context: {context[:30]}... and prompt: {prompt[:30]}..."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1086 |
|
1087 |
#
|
1088 |
# End of Chat_ui.py
|
App_Function_Libraries/Gradio_UI/Export_Functionality.py
CHANGED
@@ -8,7 +8,7 @@ import shutil
|
|
8 |
import tempfile
|
9 |
from typing import List, Dict, Optional, Tuple
|
10 |
import gradio as gr
|
11 |
-
from App_Function_Libraries.DB_Manager import DatabaseError, create_automated_backup, db_path, backup_dir
|
12 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import fetch_item_details, fetch_items_by_keyword, browse_items
|
13 |
|
14 |
logger = logging.getLogger(__name__)
|
|
|
8 |
import tempfile
|
9 |
from typing import List, Dict, Optional, Tuple
|
10 |
import gradio as gr
|
11 |
+
from App_Function_Libraries.DB.DB_Manager import DatabaseError, create_automated_backup, db_path, backup_dir
|
12 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import fetch_item_details, fetch_items_by_keyword, browse_items
|
13 |
|
14 |
logger = logging.getLogger(__name__)
|
App_Function_Libraries/Gradio_UI/Gradio_Shared.py
CHANGED
@@ -12,9 +12,9 @@ from typing import List, Tuple
|
|
12 |
import gradio as gr
|
13 |
#
|
14 |
# Local Imports
|
15 |
-
from App_Function_Libraries.DB_Manager import list_prompts, db, search_and_display, fetch_prompt_details
|
16 |
-
from App_Function_Libraries.SQLite_DB import DatabaseError
|
17 |
-
from App_Function_Libraries.Utils import format_transcription
|
18 |
#
|
19 |
##############################################################################################################
|
20 |
#
|
|
|
12 |
import gradio as gr
|
13 |
#
|
14 |
# Local Imports
|
15 |
+
from App_Function_Libraries.DB.DB_Manager import list_prompts, db, search_and_display, fetch_prompt_details
|
16 |
+
from App_Function_Libraries.DB.SQLite_DB import DatabaseError
|
17 |
+
from App_Function_Libraries.Utils.Utils import format_transcription
|
18 |
#
|
19 |
##############################################################################################################
|
20 |
#
|
App_Function_Libraries/Gradio_UI/Import_Functionality.py
CHANGED
@@ -16,7 +16,7 @@ import gradio as gr
|
|
16 |
import pypandoc
|
17 |
#
|
18 |
# Local Imports
|
19 |
-
from App_Function_Libraries.DB_Manager import insert_prompt_to_db, load_preset_prompts, import_obsidian_note_to_db, \
|
20 |
add_media_to_database
|
21 |
from App_Function_Libraries.Prompt_Handling import import_prompt_from_file, import_prompts_from_zip#
|
22 |
from App_Function_Libraries.Summarization_General_Lib import perform_summarization
|
|
|
16 |
import pypandoc
|
17 |
#
|
18 |
# Local Imports
|
19 |
+
from App_Function_Libraries.DB.DB_Manager import insert_prompt_to_db, load_preset_prompts, import_obsidian_note_to_db, \
|
20 |
add_media_to_database
|
21 |
from App_Function_Libraries.Prompt_Handling import import_prompt_from_file, import_prompts_from_zip#
|
22 |
from App_Function_Libraries.Summarization_General_Lib import perform_summarization
|
App_Function_Libraries/Gradio_UI/Introduction_tab.py
CHANGED
@@ -6,7 +6,7 @@
|
|
6 |
# External Imports
|
7 |
import gradio as gr
|
8 |
|
9 |
-
from App_Function_Libraries.DB_Manager import get_db_config
|
10 |
|
11 |
|
12 |
#
|
|
|
6 |
# External Imports
|
7 |
import gradio as gr
|
8 |
|
9 |
+
from App_Function_Libraries.DB.DB_Manager import get_db_config
|
10 |
|
11 |
|
12 |
#
|
App_Function_Libraries/Gradio_UI/Keywords.py
CHANGED
@@ -10,7 +10,7 @@
|
|
10 |
import gradio as gr
|
11 |
#
|
12 |
# Internal Imports
|
13 |
-
from App_Function_Libraries.DB_Manager import add_keyword, delete_keyword, keywords_browser_interface, export_keywords_to_csv
|
14 |
#
|
15 |
#
|
16 |
######################################################################################################################
|
|
|
10 |
import gradio as gr
|
11 |
#
|
12 |
# Internal Imports
|
13 |
+
from App_Function_Libraries.DB.DB_Manager import add_keyword, delete_keyword, keywords_browser_interface, export_keywords_to_csv
|
14 |
#
|
15 |
#
|
16 |
######################################################################################################################
|
App_Function_Libraries/Gradio_UI/Media_edit.py
CHANGED
@@ -9,10 +9,10 @@ import uuid
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
-
from App_Function_Libraries.DB_Manager import add_prompt, update_media_content, db, add_or_update_prompt, \
|
13 |
load_prompt_details
|
14 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_prompt_dropdown
|
15 |
-
from App_Function_Libraries.SQLite_DB import fetch_item_details
|
16 |
|
17 |
|
18 |
def create_media_edit_tab():
|
@@ -173,7 +173,7 @@ def create_media_edit_and_clone_tab():
|
|
173 |
|
174 |
|
175 |
def create_prompt_edit_tab():
|
176 |
-
with gr.TabItem("Edit Prompts"):
|
177 |
with gr.Row():
|
178 |
with gr.Column():
|
179 |
prompt_dropdown = gr.Dropdown(
|
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
+
from App_Function_Libraries.DB.DB_Manager import add_prompt, update_media_content, db, add_or_update_prompt, \
|
13 |
load_prompt_details
|
14 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_prompt_dropdown
|
15 |
+
from App_Function_Libraries.DB.SQLite_DB import fetch_item_details
|
16 |
|
17 |
|
18 |
def create_media_edit_tab():
|
|
|
173 |
|
174 |
|
175 |
def create_prompt_edit_tab():
|
176 |
+
with gr.TabItem("Add & Edit Prompts"):
|
177 |
with gr.Row():
|
178 |
with gr.Column():
|
179 |
prompt_dropdown = gr.Dropdown(
|
App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py
CHANGED
@@ -10,7 +10,7 @@ import tempfile
|
|
10 |
import gradio as gr
|
11 |
#
|
12 |
# Local Imports
|
13 |
-
from App_Function_Libraries.DB_Manager import load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
from App_Function_Libraries.PDF_Ingestion_Lib import extract_metadata_from_pdf, extract_text_and_format_from_pdf, \
|
16 |
process_and_cleanup_pdf
|
|
|
10 |
import gradio as gr
|
11 |
#
|
12 |
# Local Imports
|
13 |
+
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
from App_Function_Libraries.PDF_Ingestion_Lib import extract_metadata_from_pdf, extract_text_and_format_from_pdf, \
|
16 |
process_and_cleanup_pdf
|
App_Function_Libraries/Gradio_UI/Podcast_tab.py
CHANGED
@@ -9,7 +9,7 @@ import gradio as gr
|
|
9 |
#
|
10 |
# Local Imports
|
11 |
from App_Function_Libraries.Audio_Files import process_podcast
|
12 |
-
from App_Function_Libraries.DB_Manager import load_preset_prompts
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
14 |
|
15 |
|
|
|
9 |
#
|
10 |
# Local Imports
|
11 |
from App_Function_Libraries.Audio_Files import process_podcast
|
12 |
+
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
14 |
|
15 |
|
App_Function_Libraries/Gradio_UI/Re_summarize_tab.py
CHANGED
@@ -10,12 +10,12 @@ import gradio as gr
|
|
10 |
#
|
11 |
# Local Imports
|
12 |
from App_Function_Libraries.Chunk_Lib import improved_chunking_process
|
13 |
-
from App_Function_Libraries.DB_Manager import update_media_content, load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import fetch_item_details, fetch_items_by_keyword, \
|
16 |
fetch_items_by_content, fetch_items_by_title_or_url
|
17 |
from App_Function_Libraries.Summarization_General_Lib import summarize_chunk
|
18 |
-
from App_Function_Libraries.Utils import load_comprehensive_config
|
19 |
#
|
20 |
#
|
21 |
######################################################################################################################
|
|
|
10 |
#
|
11 |
# Local Imports
|
12 |
from App_Function_Libraries.Chunk_Lib import improved_chunking_process
|
13 |
+
from App_Function_Libraries.DB.DB_Manager import update_media_content, load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import fetch_item_details, fetch_items_by_keyword, \
|
16 |
fetch_items_by_content, fetch_items_by_title_or_url
|
17 |
from App_Function_Libraries.Summarization_General_Lib import summarize_chunk
|
18 |
+
from App_Function_Libraries.Utils.Utils import load_comprehensive_config
|
19 |
#
|
20 |
#
|
21 |
######################################################################################################################
|
App_Function_Libraries/Gradio_UI/Search_Tab.py
CHANGED
@@ -10,9 +10,9 @@ import sqlite3
|
|
10 |
# External Imports
|
11 |
import gradio as gr
|
12 |
|
13 |
-
from App_Function_Libraries.DB_Manager import view_database, search_and_display_items
|
14 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_detailed_view
|
15 |
-
from App_Function_Libraries.RAG_Libary_2 import rag_search
|
16 |
|
17 |
#
|
18 |
# Local Imports
|
@@ -96,7 +96,7 @@ def create_embeddings_tab():
|
|
96 |
def create_embeddings(api_choice):
|
97 |
try:
|
98 |
# Assuming you have a function that handles the creation of embeddings
|
99 |
-
from App_Function_Libraries.ChromaDB_Library import create_all_embeddings
|
100 |
status = create_all_embeddings(api_choice)
|
101 |
return status
|
102 |
except Exception as e:
|
|
|
10 |
# External Imports
|
11 |
import gradio as gr
|
12 |
|
13 |
+
from App_Function_Libraries.DB.DB_Manager import view_database, search_and_display_items
|
14 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import update_dropdown, update_detailed_view
|
15 |
+
from App_Function_Libraries.RAG.RAG_Libary_2 import rag_search
|
16 |
|
17 |
#
|
18 |
# Local Imports
|
|
|
96 |
def create_embeddings(api_choice):
|
97 |
try:
|
98 |
# Assuming you have a function that handles the creation of embeddings
|
99 |
+
from App_Function_Libraries.RAG.ChromaDB_Library import create_all_embeddings
|
100 |
status = create_all_embeddings(api_choice)
|
101 |
return status
|
102 |
except Exception as e:
|
App_Function_Libraries/Gradio_UI/Transcript_comparison.py
CHANGED
@@ -8,9 +8,9 @@ import logging
|
|
8 |
# External Imports
|
9 |
import gradio as gr
|
10 |
|
11 |
-
from App_Function_Libraries.DB_Manager import get_transcripts
|
12 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import browse_items
|
13 |
-
from App_Function_Libraries.Utils import format_transcription
|
14 |
|
15 |
|
16 |
#
|
|
|
8 |
# External Imports
|
9 |
import gradio as gr
|
10 |
|
11 |
+
from App_Function_Libraries.DB.DB_Manager import get_transcripts
|
12 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import browse_items
|
13 |
+
from App_Function_Libraries.Utils.Utils import format_transcription
|
14 |
|
15 |
|
16 |
#
|
App_Function_Libraries/Gradio_UI/Trash.py
CHANGED
@@ -9,7 +9,7 @@ import sqlite3
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
-
from App_Function_Libraries.DB_Manager import delete_prompt, empty_trash, get_trashed_items, user_delete_item
|
13 |
|
14 |
|
15 |
def delete_item(media_id, force):
|
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
+
from App_Function_Libraries.DB.DB_Manager import delete_prompt, empty_trash, get_trashed_items, user_delete_item
|
13 |
|
14 |
|
15 |
def delete_item(media_id, force):
|
App_Function_Libraries/Gradio_UI/Utilities.py
CHANGED
@@ -6,7 +6,7 @@ from pathlib import Path
|
|
6 |
import gradio as gr
|
7 |
import yt_dlp
|
8 |
|
9 |
-
from App_Function_Libraries.Utils import sanitize_filename, downloaded_files
|
10 |
|
11 |
|
12 |
def create_utilities_yt_video_tab():
|
|
|
6 |
import gradio as gr
|
7 |
import yt_dlp
|
8 |
|
9 |
+
from App_Function_Libraries.Utils.Utils import sanitize_filename, downloaded_files
|
10 |
|
11 |
|
12 |
def create_utilities_yt_video_tab():
|
App_Function_Libraries/Gradio_UI/Video_transcription_tab.py
CHANGED
@@ -9,14 +9,16 @@ import os
|
|
9 |
# External Imports
|
10 |
import gradio as gr
|
11 |
import yt_dlp
|
|
|
|
|
12 |
#
|
13 |
# Local Imports
|
14 |
-
from App_Function_Libraries.DB_Manager import load_preset_prompts, add_media_to_database
|
15 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
16 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import error_handler
|
17 |
from App_Function_Libraries.Summarization_General_Lib import perform_transcription, perform_summarization, \
|
18 |
save_transcription_and_summary
|
19 |
-
from App_Function_Libraries.Utils import convert_to_seconds, safe_read_file, format_transcription, \
|
20 |
create_download_directory, generate_unique_identifier, extract_text_from_segments
|
21 |
from App_Function_Libraries.Video_DL_Ingestion_Lib import parse_and_expand_urls, extract_metadata, download_video
|
22 |
#
|
@@ -122,7 +124,7 @@ def create_video_transcription_tab():
|
|
122 |
summarize_recursively = gr.Checkbox(label="Enable Recursive Summarization", value=False)
|
123 |
use_cookies_input = gr.Checkbox(label="Use cookies for authenticated download", value=False)
|
124 |
use_time_input = gr.Checkbox(label="Use Start and End Time", value=False)
|
125 |
-
|
126 |
with gr.Row(visible=False) as time_input_box:
|
127 |
gr.Markdown("### Start and End time")
|
128 |
with gr.Column():
|
@@ -176,6 +178,7 @@ def create_video_transcription_tab():
|
|
176 |
progress_output = gr.Textbox(label="Progress")
|
177 |
error_output = gr.Textbox(label="Errors", visible=False)
|
178 |
results_output = gr.HTML(label="Results")
|
|
|
179 |
download_transcription = gr.File(label="Download All Transcriptions as JSON")
|
180 |
download_summary = gr.File(label="Download All Summaries as Text")
|
181 |
|
@@ -422,7 +425,7 @@ def create_video_transcription_tab():
|
|
422 |
chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
|
423 |
use_multi_level_chunking, chunk_language, summarize_recursively, api_name,
|
424 |
api_key, keywords, use_cookies, cookies, batch_size,
|
425 |
-
timestamp_option, keep_original_video):
|
426 |
global result
|
427 |
try:
|
428 |
logging.info("process_videos_wrapper(): process_videos_wrapper called")
|
@@ -450,42 +453,41 @@ def create_video_transcription_tab():
|
|
450 |
|
451 |
if not inputs:
|
452 |
raise ValueError("No input provided. Please enter URLs or upload a video file.")
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
|
|
468 |
|
469 |
# Ensure that result is a tuple with 5 elements
|
470 |
if not isinstance(result, tuple) or len(result) != 5:
|
471 |
raise ValueError(
|
472 |
f"process_videos_wrapper(): Expected 5 outputs, but got {len(result) if isinstance(result, tuple) else 1}")
|
473 |
|
474 |
-
|
|
|
|
|
475 |
except Exception as e:
|
476 |
logging.error(f"process_videos_wrapper(): Error in process_videos_wrapper: {str(e)}", exc_info=True)
|
477 |
-
# Return a tuple with
|
478 |
return (
|
479 |
-
# progress_output
|
480 |
-
|
481 |
-
#
|
482 |
-
|
483 |
-
#
|
484 |
-
|
485 |
-
# download_transcription
|
486 |
-
None,
|
487 |
-
# download_summary
|
488 |
-
None
|
489 |
)
|
490 |
|
491 |
# FIXME - remove dead args for process_url_with_metadata
|
@@ -590,7 +592,6 @@ def create_video_transcription_tab():
|
|
590 |
with open(segments_json_path, 'w') as f:
|
591 |
json.dump(segments_with_metadata, f, indent=2)
|
592 |
|
593 |
-
# FIXME - why isnt this working?
|
594 |
# Delete the .wav file after successful transcription
|
595 |
files_to_delete = [audio_file_path]
|
596 |
for file_path in files_to_delete:
|
@@ -677,6 +678,14 @@ def create_video_transcription_tab():
|
|
677 |
logging.error(f"Error in process_url_with_metadata: {str(e)}", exc_info=True)
|
678 |
return None, None, None, None, None, None
|
679 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
680 |
process_button.click(
|
681 |
fn=process_videos_wrapper,
|
682 |
inputs=[
|
@@ -685,7 +694,7 @@ def create_video_transcription_tab():
|
|
685 |
chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
|
686 |
use_multi_level_chunking, chunk_language, summarize_recursively, api_name_input, api_key_input,
|
687 |
keywords_input, use_cookies_input, cookies_input, batch_size_input,
|
688 |
-
timestamp_option, keep_original_video
|
689 |
],
|
690 |
-
outputs=[progress_output, error_output, results_output, download_transcription, download_summary]
|
691 |
)
|
|
|
9 |
# External Imports
|
10 |
import gradio as gr
|
11 |
import yt_dlp
|
12 |
+
|
13 |
+
from App_Function_Libraries.Confabulation_check import simplified_geval
|
14 |
#
|
15 |
# Local Imports
|
16 |
+
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts, add_media_to_database
|
17 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
18 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import error_handler
|
19 |
from App_Function_Libraries.Summarization_General_Lib import perform_transcription, perform_summarization, \
|
20 |
save_transcription_and_summary
|
21 |
+
from App_Function_Libraries.Utils.Utils import convert_to_seconds, safe_read_file, format_transcription, \
|
22 |
create_download_directory, generate_unique_identifier, extract_text_from_segments
|
23 |
from App_Function_Libraries.Video_DL_Ingestion_Lib import parse_and_expand_urls, extract_metadata, download_video
|
24 |
#
|
|
|
124 |
summarize_recursively = gr.Checkbox(label="Enable Recursive Summarization", value=False)
|
125 |
use_cookies_input = gr.Checkbox(label="Use cookies for authenticated download", value=False)
|
126 |
use_time_input = gr.Checkbox(label="Use Start and End Time", value=False)
|
127 |
+
confab_checkbox = gr.Checkbox(label="Perform Confabulation Check of Summary", value=False)
|
128 |
with gr.Row(visible=False) as time_input_box:
|
129 |
gr.Markdown("### Start and End time")
|
130 |
with gr.Column():
|
|
|
178 |
progress_output = gr.Textbox(label="Progress")
|
179 |
error_output = gr.Textbox(label="Errors", visible=False)
|
180 |
results_output = gr.HTML(label="Results")
|
181 |
+
confabulation_output = gr.Textbox(label="Confabulation Check Results", visible=False)
|
182 |
download_transcription = gr.File(label="Download All Transcriptions as JSON")
|
183 |
download_summary = gr.File(label="Download All Summaries as Text")
|
184 |
|
|
|
425 |
chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
|
426 |
use_multi_level_chunking, chunk_language, summarize_recursively, api_name,
|
427 |
api_key, keywords, use_cookies, cookies, batch_size,
|
428 |
+
timestamp_option, keep_original_video, confab_checkbox):
|
429 |
global result
|
430 |
try:
|
431 |
logging.info("process_videos_wrapper(): process_videos_wrapper called")
|
|
|
453 |
|
454 |
if not inputs:
|
455 |
raise ValueError("No input provided. Please enter URLs or upload a video file.")
|
456 |
+
|
457 |
+
result = process_videos_with_error_handling(
|
458 |
+
inputs, start_time, end_time, diarize, whisper_model,
|
459 |
+
custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
|
460 |
+
chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
|
461 |
+
use_multi_level_chunking, chunk_language, api_name,
|
462 |
+
api_key, keywords, use_cookies, cookies, batch_size,
|
463 |
+
timestamp_option, keep_original_video, summarize_recursively
|
464 |
+
)
|
465 |
+
|
466 |
+
confabulation_result = None
|
467 |
+
if confab_checkbox:
|
468 |
+
logging.info("Confabulation check enabled")
|
469 |
+
# Assuming result[1] contains the transcript and result[2] contains the summary
|
470 |
+
confabulation_result = simplified_geval(result[1], result[2], api_name, api_key)
|
471 |
+
logging.info(f"Simplified G-Eval result: {confabulation_result}")
|
472 |
|
473 |
# Ensure that result is a tuple with 5 elements
|
474 |
if not isinstance(result, tuple) or len(result) != 5:
|
475 |
raise ValueError(
|
476 |
f"process_videos_wrapper(): Expected 5 outputs, but got {len(result) if isinstance(result, tuple) else 1}")
|
477 |
|
478 |
+
# Return the confabulation result along with other outputs
|
479 |
+
return (*result, confabulation_result)
|
480 |
+
|
481 |
except Exception as e:
|
482 |
logging.error(f"process_videos_wrapper(): Error in process_videos_wrapper: {str(e)}", exc_info=True)
|
483 |
+
# Return a tuple with 6 elements in case of any error (including None for simple_geval_result)
|
484 |
return (
|
485 |
+
f"process_videos_wrapper(): An error occurred: {str(e)}", # progress_output
|
486 |
+
str(e), # error_output
|
487 |
+
f"<div class='error'>Error: {str(e)}</div>", # results_output
|
488 |
+
None, # download_transcription
|
489 |
+
None, # download_summary
|
490 |
+
None # simple_geval_result
|
|
|
|
|
|
|
|
|
491 |
)
|
492 |
|
493 |
# FIXME - remove dead args for process_url_with_metadata
|
|
|
592 |
with open(segments_json_path, 'w') as f:
|
593 |
json.dump(segments_with_metadata, f, indent=2)
|
594 |
|
|
|
595 |
# Delete the .wav file after successful transcription
|
596 |
files_to_delete = [audio_file_path]
|
597 |
for file_path in files_to_delete:
|
|
|
678 |
logging.error(f"Error in process_url_with_metadata: {str(e)}", exc_info=True)
|
679 |
return None, None, None, None, None, None
|
680 |
|
681 |
+
def toggle_confabulation_output(checkbox_value):
|
682 |
+
return gr.update(visible=checkbox_value)
|
683 |
+
|
684 |
+
confab_checkbox.change(
|
685 |
+
fn=toggle_confabulation_output,
|
686 |
+
inputs=[confab_checkbox],
|
687 |
+
outputs=[confabulation_output]
|
688 |
+
)
|
689 |
process_button.click(
|
690 |
fn=process_videos_wrapper,
|
691 |
inputs=[
|
|
|
694 |
chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
|
695 |
use_multi_level_chunking, chunk_language, summarize_recursively, api_name_input, api_key_input,
|
696 |
keywords_input, use_cookies_input, cookies_input, batch_size_input,
|
697 |
+
timestamp_option, keep_original_video, confab_checkbox
|
698 |
],
|
699 |
+
outputs=[progress_output, error_output, results_output, download_transcription, download_summary, confabulation_output]
|
700 |
)
|
App_Function_Libraries/Gradio_UI/Website_scraping_tab.py
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
import gradio as gr
|
8 |
|
9 |
from App_Function_Libraries.Article_Summarization_Lib import scrape_and_summarize_multiple
|
10 |
-
from App_Function_Libraries.DB_Manager import load_preset_prompts
|
11 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
12 |
|
13 |
|
|
|
7 |
import gradio as gr
|
8 |
|
9 |
from App_Function_Libraries.Article_Summarization_Lib import scrape_and_summarize_multiple
|
10 |
+
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
11 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
12 |
|
13 |
|