Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -79,6 +79,7 @@ MODEL_CONTEXT_SIZES = {
|
|
79 |
"openGPT-X/Teuken-7B-instruct-research-v0.4": 4096,
|
80 |
"Qwen/Qwen2.5-7B-Instruct": 131072,
|
81 |
"tiiuae/falcon-7b-instruct": 8192,
|
|
|
82 |
},
|
83 |
"Groq API": {
|
84 |
"gemma2-9b-it": 8192,
|
@@ -157,10 +158,17 @@ class ModelRegistry:
|
|
157 |
"Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", # works well
|
158 |
"Zephyr 7B": "HuggingFaceH4/zephyr-7b-beta", # works
|
159 |
"Phi-3.5 Mini": "microsoft/Phi-3.5-mini-instruct", # works but poor results
|
|
|
|
|
160 |
"Gemma 2 2B": "google/gemma-2-2b-it", # works but often busy
|
161 |
"GPT2": "openai-community/gpt2", # works with token limits
|
162 |
"Phi-2": "microsoft/phi-2", # works with token limits
|
163 |
"TinyLlama 1.1B": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", # works with token limits
|
|
|
|
|
|
|
|
|
|
|
164 |
"DeepSeek Coder V2 (Pro)": "deepseek-ai/DeepSeek-Coder-V2-Instruct", # needs API key
|
165 |
"Meta Llama 3.1 70B (Pro)": "meta-llama/Meta-Llama-3.1-70B-Instruct", # needs API key
|
166 |
"Aya 23-35B (Pro)": "CohereForAI/aya-23-35B", # needs API key
|
@@ -540,7 +548,7 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
|
|
540 |
# 1. HTML component for clipboard action
|
541 |
# 2. A success message for summary output
|
542 |
# 3. The download file
|
543 |
-
return gr.HTML(html_template), "Text
|
544 |
|
545 |
|
546 |
# Get the summary based on model selection
|
@@ -603,8 +611,37 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
|
|
603 |
logging.error(f"Error in send_to_model_impl: {error_msg}")
|
604 |
return f"Error: {error_msg}", None
|
605 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
606 |
def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None, use_rate_limit: bool = False) -> str:
|
607 |
"""Send prompt to HuggingFace Inference API."""
|
|
|
|
|
|
|
|
|
608 |
def _send():
|
609 |
# Check token limits first
|
610 |
is_within_limits, error_msg = check_token_limits(prompt, model_name)
|
@@ -767,6 +804,110 @@ def check_token_limits(prompt: str, model_name: str) -> tuple[bool, str]:
|
|
767 |
return False, f"Prompt too long (estimated {estimated_tokens} tokens). This model supports max {max_tokens} tokens."
|
768 |
return True, ""
|
769 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
770 |
def copy_text_js(element_id: str) -> str:
|
771 |
return f"""function() {{
|
772 |
let textarea = document.getElementById('{element_id}');
|
@@ -780,10 +921,6 @@ def copy_text_js(element_id: str) -> str:
|
|
780 |
}}
|
781 |
}}"""
|
782 |
|
783 |
-
def open_chatgpt() -> str:
|
784 |
-
"""Open ChatGPT in new browser tab"""
|
785 |
-
return """window.open('https://chat.openai.com/', '_blank');"""
|
786 |
-
|
787 |
def process_pdf(pdf, fmt, ctx_size):
|
788 |
"""Process PDF and return text and snippets"""
|
789 |
try:
|
@@ -953,186 +1090,240 @@ with gr.Blocks(css="""
|
|
953 |
|
954 |
# Tab 3: Model Processing
|
955 |
with gr.Tab("3οΈβ£ Model Processing"):
|
956 |
-
|
957 |
-
|
958 |
-
|
959 |
-
|
960 |
-
|
961 |
-
|
962 |
-
)
|
963 |
-
|
964 |
-
with gr.Column(visible=False) as openai_options:
|
965 |
-
openai_model = gr.Dropdown(
|
966 |
-
choices=list(MODEL_CONTEXT_SIZES["OpenAI ChatGPT"].keys()),
|
967 |
-
value="gpt-3.5-turbo",
|
968 |
-
label="OpenAI Model"
|
969 |
-
)
|
970 |
-
openai_api_key = gr.Textbox(
|
971 |
-
label="π OpenAI API Key",
|
972 |
-
type="password"
|
973 |
-
)
|
974 |
-
|
975 |
-
with gr.Column(visible=False) as hf_options:
|
976 |
-
hf_model = gr.Dropdown(
|
977 |
-
choices=list(model_registry.hf_models.keys()),
|
978 |
-
label="π§ HuggingFace Model",
|
979 |
-
value="Mistral 7B",
|
980 |
-
allow_custom_value=True
|
981 |
-
)
|
982 |
-
hf_custom_model = gr.Textbox( # This needs to be defined before being used
|
983 |
-
label="Custom Model ID",
|
984 |
-
placeholder="Enter custom model ID...",
|
985 |
-
visible=False
|
986 |
-
)
|
987 |
-
hf_api_key = gr.Textbox(
|
988 |
-
label="π HuggingFace API Key",
|
989 |
-
type="password"
|
990 |
-
)
|
991 |
-
|
992 |
-
with gr.Column(visible=False) as groq_options:
|
993 |
-
groq_model = gr.Dropdown(
|
994 |
-
choices=list(model_registry.groq_models.keys()), # Use model_registry.groq_models
|
995 |
-
value=list(model_registry.groq_models.keys())[0] if model_registry.groq_models else None, # Set a default value if available
|
996 |
-
label="Groq Model"
|
997 |
-
)
|
998 |
-
groq_api_key = gr.Textbox(
|
999 |
-
label="π Groq API Key",
|
1000 |
-
type="password"
|
1001 |
-
)
|
1002 |
-
groq_refresh_btn = gr.Button("π Refresh Groq Models") # Add refresh button
|
1003 |
-
|
1004 |
-
with gr.Column(visible=False) as glhf_options:
|
1005 |
-
glhf_api_key = gr.Textbox(
|
1006 |
-
label="π GLHF API Key",
|
1007 |
-
type="password"
|
1008 |
-
)
|
1009 |
-
glhf_model = gr.Dropdown(
|
1010 |
-
choices=list(MODEL_CONTEXT_SIZES["GLHF API"].keys()) + ["Custom Model"],
|
1011 |
-
value="mistralai/Mistral-7B-Instruct-v0.3",
|
1012 |
-
label="Model Selection"
|
1013 |
-
)
|
1014 |
-
glhf_custom_model = gr.Textbox(
|
1015 |
-
label="Custom Model ID (owner/model format)",
|
1016 |
-
placeholder="e.g., mistralai/Mixtral-8x7B-Instruct-v0.2",
|
1017 |
-
visible=False
|
1018 |
)
|
1019 |
|
1020 |
-
|
1021 |
-
|
1022 |
-
|
1023 |
-
|
1024 |
-
|
1025 |
-
|
1026 |
-
|
1027 |
-
|
1028 |
-
|
1029 |
-
|
1030 |
-
|
1031 |
-
with gr.Column(visible=False) as cohere_options:
|
1032 |
-
cohere_api_key = gr.Textbox(
|
1033 |
-
label="π Cohere API Key (optional - needed for some models)",
|
1034 |
-
type="password"
|
1035 |
-
)
|
1036 |
-
cohere_model = gr.Dropdown(
|
1037 |
-
choices=[
|
1038 |
-
"command-r-plus-08-2024",
|
1039 |
-
"command-r-plus-04-2024",
|
1040 |
-
"command-r",
|
1041 |
-
"command",
|
1042 |
-
"command-light"
|
1043 |
-
],
|
1044 |
-
value="command-r-plus-08-2024",
|
1045 |
-
label="Cohere Model"
|
1046 |
-
)
|
1047 |
|
1048 |
-
|
1049 |
-
|
1050 |
-
|
1051 |
-
|
1052 |
-
|
1053 |
-
|
1054 |
-
|
1055 |
-
|
1056 |
-
|
1057 |
-
|
1058 |
-
|
1059 |
-
|
1060 |
-
|
1061 |
-
|
1062 |
-
|
1063 |
-
|
1064 |
-
|
1065 |
-
|
1066 |
-
|
1067 |
-
|
1068 |
-
|
1069 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1070 |
setTimeout(() => {
|
1071 |
-
this.textContent = 'π Copy
|
1072 |
}, 2000);
|
1073 |
-
}
|
1074 |
-
|
1075 |
-
|
1076 |
-
|
1077 |
-
|
1078 |
-
|
1079 |
-
document.execCommand('copy');
|
1080 |
-
this.textContent = 'β
Copied! Opening ChatGPT...';
|
1081 |
-
setTimeout(() => {
|
1082 |
-
window.open('https://chat.openai.com/', '_blank');
|
1083 |
setTimeout(() => {
|
1084 |
-
this.textContent = 'π Copy
|
1085 |
}, 2000);
|
1086 |
-
}
|
1087 |
-
|
1088 |
-
|
1089 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1090 |
setTimeout(() => {
|
1091 |
-
this.textContent = 'π Copy
|
1092 |
}, 2000);
|
1093 |
}
|
1094 |
-
|
1095 |
-
|
1096 |
-
|
1097 |
-
|
1098 |
-
|
1099 |
-
|
1100 |
-
|
1101 |
-
|
1102 |
-
|
1103 |
-
|
1104 |
-
|
1105 |
-
|
1106 |
-
|
1107 |
-
|
1108 |
-
|
1109 |
-
|
1110 |
-
|
1111 |
-
|
1112 |
-
|
1113 |
-
|
1114 |
-
|
1115 |
-
|
1116 |
-
|
1117 |
-
|
1118 |
-
|
1119 |
-
|
1120 |
-
|
1121 |
-
|
1122 |
-
|
1123 |
-
|
1124 |
-
|
1125 |
-
|
1126 |
-
|
1127 |
-
|
1128 |
-
|
1129 |
-
|
1130 |
-
|
1131 |
-
|
1132 |
-
|
1133 |
-
|
1134 |
-
|
1135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1136 |
|
1137 |
# Hidden components for file handling
|
1138 |
download_files = gr.Files(label="π₯ Downloads", visible=False)
|
@@ -1193,80 +1384,6 @@ with gr.Blocks(css="""
|
|
1193 |
def handle_groq_model_change(model_name):
|
1194 |
"""Handle Groq model selection change"""
|
1195 |
return update_context_size("Groq API", model_name)
|
1196 |
-
|
1197 |
-
def handle_model_selection(choice):
|
1198 |
-
"""Handle model selection and update UI"""
|
1199 |
-
ctx_size = MODEL_CONTEXT_SIZES.get(choice, {})
|
1200 |
-
if isinstance(ctx_size, dict):
|
1201 |
-
first_model = list(ctx_size.keys())[0]
|
1202 |
-
ctx_size = ctx_size[first_model]
|
1203 |
-
|
1204 |
-
if choice == "OpenAI ChatGPT":
|
1205 |
-
model_choices = list(MODEL_CONTEXT_SIZES["OpenAI ChatGPT"].keys())
|
1206 |
-
return [
|
1207 |
-
gr.update(visible=False), # hf_options
|
1208 |
-
gr.update(visible=False), # groq_options
|
1209 |
-
gr.update(visible=True), # openai_options
|
1210 |
-
gr.update(visible=False), # cohere_options
|
1211 |
-
gr.update(visible=False), # glhf_options
|
1212 |
-
gr.update(value=ctx_size), # context_size
|
1213 |
-
gr.Dropdown(choices=model_choices, value=first_model) # openai_model
|
1214 |
-
]
|
1215 |
-
elif choice == "HuggingFace Inference":
|
1216 |
-
model_choices = list(model_registry.hf_models.keys())
|
1217 |
-
return [
|
1218 |
-
gr.update(visible=True), # hf_options
|
1219 |
-
gr.update(visible=False), # groq_options
|
1220 |
-
gr.update(visible=False), # openai_options
|
1221 |
-
gr.update(visible=False), # cohere_options
|
1222 |
-
gr.update(visible=False), # glhf_options
|
1223 |
-
gr.update(value=ctx_size), # context_size
|
1224 |
-
gr.Dropdown(choices=model_choices, value="Mistral 7B") # Update default value
|
1225 |
-
]
|
1226 |
-
elif choice == "Groq API":
|
1227 |
-
model_choices = list(model_registry.groq_models.keys())
|
1228 |
-
return [
|
1229 |
-
gr.update(visible=False), # hf_options
|
1230 |
-
gr.update(visible=True), # groq_options
|
1231 |
-
gr.update(visible=False), # openai_options
|
1232 |
-
gr.update(visible=False), # cohere_options
|
1233 |
-
gr.update(visible=False), # glhf_options
|
1234 |
-
gr.update(value=ctx_size), # context_size
|
1235 |
-
gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
|
1236 |
-
]
|
1237 |
-
elif choice == "Cohere API":
|
1238 |
-
return [
|
1239 |
-
gr.update(visible=False), # hf_options
|
1240 |
-
gr.update(visible=False), # groq_options
|
1241 |
-
gr.update(visible=False), # openai_options
|
1242 |
-
gr.update(visible=True), # cohere_options
|
1243 |
-
gr.update(visible=False), # glhf_options
|
1244 |
-
gr.update(value=ctx_size), # context_size
|
1245 |
-
gr.Dropdown(choices=[]) # not used
|
1246 |
-
]
|
1247 |
-
elif choice == "GLHF API":
|
1248 |
-
# Always show HuggingFace models for GLHF since they're used in both cases
|
1249 |
-
# model_choices = list(model_registry.hf_models.keys())
|
1250 |
-
return [
|
1251 |
-
gr.update(visible=False), # hf_options - hide completely
|
1252 |
-
gr.update(visible=False), # groq_options
|
1253 |
-
gr.update(visible=False), # openai_options
|
1254 |
-
gr.update(visible=False), # cohere_options
|
1255 |
-
gr.update(visible=True), # glhf_options
|
1256 |
-
gr.update(value=ctx_size), # context_size
|
1257 |
-
gr.Dropdown(choices=[]) # not used
|
1258 |
-
]
|
1259 |
-
|
1260 |
-
# Default return for "Clipboard only" or other options
|
1261 |
-
return [
|
1262 |
-
gr.update(visible=False), # hf_options
|
1263 |
-
gr.update(visible=False), # groq_options
|
1264 |
-
gr.update(visible=False), # openai_options
|
1265 |
-
gr.update(visible=False), # cohere_options
|
1266 |
-
gr.update(visible=False), # glhf_options
|
1267 |
-
gr.update(value=4096), # context_size
|
1268 |
-
gr.Dropdown(choices=[]) # not used
|
1269 |
-
]
|
1270 |
|
1271 |
# PDF Processing Handlers
|
1272 |
def handle_pdf_process(pdf, fmt, ctx_size): # Remove md_eng parameter
|
@@ -1370,6 +1487,20 @@ with gr.Blocks(css="""
|
|
1370 |
outputs=[progress_status, generated_prompt, download_prompt]
|
1371 |
)
|
1372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1373 |
# Snippet handling
|
1374 |
snippet_selector.change(
|
1375 |
handle_snippet_selection,
|
@@ -1377,7 +1508,6 @@ with gr.Blocks(css="""
|
|
1377 |
outputs=[progress_status, generated_prompt, download_snippet] # Connect download_snippet
|
1378 |
)
|
1379 |
|
1380 |
-
# Model selection
|
1381 |
# Model selection
|
1382 |
model_choice.change(
|
1383 |
handle_model_selection,
|
@@ -1389,7 +1519,9 @@ with gr.Blocks(css="""
|
|
1389 |
cohere_options,
|
1390 |
glhf_options,
|
1391 |
context_size,
|
1392 |
-
|
|
|
|
|
1393 |
]
|
1394 |
)
|
1395 |
|
|
|
79 |
"openGPT-X/Teuken-7B-instruct-research-v0.4": 4096,
|
80 |
"Qwen/Qwen2.5-7B-Instruct": 131072,
|
81 |
"tiiuae/falcon-7b-instruct": 8192,
|
82 |
+
"Qwen/QwQ-32B-preview": 32768, # Add QwQ model
|
83 |
},
|
84 |
"Groq API": {
|
85 |
"gemma2-9b-it": 8192,
|
|
|
158 |
"Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", # works well
|
159 |
"Zephyr 7B": "HuggingFaceH4/zephyr-7b-beta", # works
|
160 |
"Phi-3.5 Mini": "microsoft/Phi-3.5-mini-instruct", # works but poor results
|
161 |
+
"Phi-3 Mini 4K": "microsoft/phi-3-mini-4k-instruct", # good for small context
|
162 |
+
"Phi-3 Mini 128K": "microsoft/Phi-3-mini-128k-instruct", # good for large context
|
163 |
"Gemma 2 2B": "google/gemma-2-2b-it", # works but often busy
|
164 |
"GPT2": "openai-community/gpt2", # works with token limits
|
165 |
"Phi-2": "microsoft/phi-2", # works with token limits
|
166 |
"TinyLlama 1.1B": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", # works with token limits
|
167 |
+
"DeepSeek Coder V2": "deepseek-ai/DeepSeek-Coder-V2-Instruct", # good for code
|
168 |
+
"Falcon-7B": "tiiuae/falcon-7b-instruct", # reliable
|
169 |
+
"Qwen 2.5 7B": "Qwen/Qwen2.5-7B-Instruct", # good performance
|
170 |
+
"QwQ 32B Preview": "Qwen/QwQ-32B-preview", # special handling
|
171 |
+
# Models requiring API key
|
172 |
"DeepSeek Coder V2 (Pro)": "deepseek-ai/DeepSeek-Coder-V2-Instruct", # needs API key
|
173 |
"Meta Llama 3.1 70B (Pro)": "meta-llama/Meta-Llama-3.1-70B-Instruct", # needs API key
|
174 |
"Aya 23-35B (Pro)": "CohereForAI/aya-23-35B", # needs API key
|
|
|
548 |
# 1. HTML component for clipboard action
|
549 |
# 2. A success message for summary output
|
550 |
# 3. The download file
|
551 |
+
return gr.HTML(html_template), "Use Copy Text to Clipboard button below, then paste where you like.", download_file
|
552 |
|
553 |
|
554 |
# Get the summary based on model selection
|
|
|
611 |
logging.error(f"Error in send_to_model_impl: {error_msg}")
|
612 |
return f"Error: {error_msg}", None
|
613 |
|
614 |
+
def send_to_qwq(prompt: str):
|
615 |
+
"""Send prompt to QwQ API."""
|
616 |
+
try:
|
617 |
+
from gradio_client import Client
|
618 |
+
client = Client("Qwen/QwQ-32B-preview")
|
619 |
+
|
620 |
+
# Call the add_text endpoint
|
621 |
+
result = client.predict(
|
622 |
+
_input={"files":[], "text": prompt},
|
623 |
+
_chatbot=[],
|
624 |
+
api_name="/add_text"
|
625 |
+
)
|
626 |
+
|
627 |
+
# Call the agent_run endpoint
|
628 |
+
response = client.predict(
|
629 |
+
_chatbot=result[1],
|
630 |
+
api_name="/agent_run"
|
631 |
+
)
|
632 |
+
|
633 |
+
return response[0].get('text', 'No response from QwQ')
|
634 |
+
|
635 |
+
except Exception as e:
|
636 |
+
logging.error(f"QwQ API error: {e}")
|
637 |
+
return f"Error with QwQ API: {str(e)}"
|
638 |
+
|
639 |
def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None, use_rate_limit: bool = False) -> str:
|
640 |
"""Send prompt to HuggingFace Inference API."""
|
641 |
+
# Special handling for QwQ
|
642 |
+
if model_name == "Qwen/QwQ-32B-preview":
|
643 |
+
return send_to_qwq(prompt)
|
644 |
+
|
645 |
def _send():
|
646 |
# Check token limits first
|
647 |
is_within_limits, error_msg = check_token_limits(prompt, model_name)
|
|
|
804 |
return False, f"Prompt too long (estimated {estimated_tokens} tokens). This model supports max {max_tokens} tokens."
|
805 |
return True, ""
|
806 |
|
807 |
+
def copy_to_clipboard(text):
|
808 |
+
return gr.HTML(f"""
|
809 |
+
<script>
|
810 |
+
navigator.clipboard.writeText(`{text}`).then(
|
811 |
+
function() {{
|
812 |
+
const btn = document.querySelector('button:contains("Copy to Clipboard")');
|
813 |
+
btn.textContent = 'β
Copied!';
|
814 |
+
setTimeout(() => btn.textContent = 'π Copy to Clipboard', 2000);
|
815 |
+
}},
|
816 |
+
function(err) {{
|
817 |
+
console.error('Failed to copy:', err);
|
818 |
+
const btn = document.querySelector('button:contains("Copy to Clipboard")');
|
819 |
+
btn.textContent = 'β Failed to copy';
|
820 |
+
setTimeout(() => btn.textContent = 'π Copy to Clipboard', 2000);
|
821 |
+
}}
|
822 |
+
);
|
823 |
+
</script>
|
824 |
+
""")
|
825 |
+
|
826 |
+
def handle_model_selection(choice):
|
827 |
+
"""Handle model selection and update UI"""
|
828 |
+
ctx_size = MODEL_CONTEXT_SIZES.get(choice, {})
|
829 |
+
if isinstance(ctx_size, dict):
|
830 |
+
first_model = list(ctx_size.keys())[0]
|
831 |
+
ctx_size = ctx_size[first_model]
|
832 |
+
|
833 |
+
if choice == "OpenAI ChatGPT":
|
834 |
+
model_choices = list(MODEL_CONTEXT_SIZES["OpenAI ChatGPT"].keys())
|
835 |
+
return [
|
836 |
+
gr.update(visible=False), # hf_options
|
837 |
+
gr.update(visible=False), # groq_options
|
838 |
+
gr.update(visible=True), # openai_options
|
839 |
+
gr.update(visible=False), # cohere_options
|
840 |
+
gr.update(visible=False), # glhf_options
|
841 |
+
gr.update(value=ctx_size), # context_size
|
842 |
+
gr.update(interactive=True), # send_model_btn
|
843 |
+
gr.Dropdown(choices=model_choices, value=first_model), # openai_model
|
844 |
+
gr.update(visible=False) # hf_custom_model visibility
|
845 |
+
]
|
846 |
+
elif choice == "HuggingFace Inference":
|
847 |
+
model_choices = list(MODEL_CONTEXT_SIZES["HuggingFace Inference"].keys())
|
848 |
+
return [
|
849 |
+
gr.update(visible=True), # hf_options
|
850 |
+
gr.update(visible=False), # groq_options
|
851 |
+
gr.update(visible=False), # openai_options
|
852 |
+
gr.update(visible=False), # cohere_options
|
853 |
+
gr.update(visible=False), # glhf_options
|
854 |
+
gr.update(value=ctx_size), # context_size
|
855 |
+
gr.update(interactive=True), # send_model_btn
|
856 |
+
gr.Dropdown(choices=model_choices, value="mistralai/Mistral-7B-Instruct-v0.3"),
|
857 |
+
gr.update(visible=False) # hf_custom_model initially hidden
|
858 |
+
]
|
859 |
+
elif choice == "Groq API":
|
860 |
+
model_choices = list(model_registry.groq_models.keys())
|
861 |
+
return [
|
862 |
+
gr.update(visible=False), # hf_options
|
863 |
+
gr.update(visible=True), # groq_options
|
864 |
+
gr.update(visible=False), # openai_options
|
865 |
+
gr.update(visible=False), # cohere_options
|
866 |
+
gr.update(visible=False), # glhf_options
|
867 |
+
gr.update(value=ctx_size), # context_size
|
868 |
+
gr.update(interactive=True), # send_model_btn
|
869 |
+
gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None),
|
870 |
+
gr.update(visible=False) # hf_custom_model visibility
|
871 |
+
]
|
872 |
+
elif choice == "Cohere API":
|
873 |
+
return [
|
874 |
+
gr.update(visible=False), # hf_options
|
875 |
+
gr.update(visible=False), # groq_options
|
876 |
+
gr.update(visible=False), # openai_options
|
877 |
+
gr.update(visible=True), # cohere_options
|
878 |
+
gr.update(visible=False), # glhf_options
|
879 |
+
gr.update(value=ctx_size), # context_size
|
880 |
+
gr.update(interactive=True), # send_model_btn
|
881 |
+
gr.Dropdown(choices=[]), # not used
|
882 |
+
gr.update(visible=False) # hf_custom_model visibility
|
883 |
+
]
|
884 |
+
elif choice == "GLHF API":
|
885 |
+
model_choices = list(MODEL_CONTEXT_SIZES["GLHF API"].keys())
|
886 |
+
return [
|
887 |
+
gr.update(visible=False), # hf_options
|
888 |
+
gr.update(visible=False), # groq_options
|
889 |
+
gr.update(visible=False), # openai_options
|
890 |
+
gr.update(visible=False), # cohere_options
|
891 |
+
gr.update(visible=True), # glhf_options
|
892 |
+
gr.update(value=ctx_size), # context_size
|
893 |
+
gr.update(interactive=True), # send_model_btn
|
894 |
+
gr.Dropdown(choices=[]), # not used
|
895 |
+
gr.update(visible=False) # hf_custom_model visibility
|
896 |
+
]
|
897 |
+
|
898 |
+
# Default return for "Clipboard only" or other options
|
899 |
+
return [
|
900 |
+
gr.update(visible=False), # hf_options
|
901 |
+
gr.update(visible=False), # groq_options
|
902 |
+
gr.update(visible=False), # openai_options
|
903 |
+
gr.update(visible=False), # cohere_options
|
904 |
+
gr.update(visible=False), # glhf_options
|
905 |
+
gr.update(value=4096), # context_size
|
906 |
+
gr.update(interactive=False), # send_model_btn
|
907 |
+
gr.Dropdown(choices=[]), # not used
|
908 |
+
gr.update(visible=False) # hf_custom_model visibility
|
909 |
+
]
|
910 |
+
|
911 |
def copy_text_js(element_id: str) -> str:
|
912 |
return f"""function() {{
|
913 |
let textarea = document.getElementById('{element_id}');
|
|
|
921 |
}}
|
922 |
}}"""
|
923 |
|
|
|
|
|
|
|
|
|
924 |
def process_pdf(pdf, fmt, ctx_size):
|
925 |
"""Process PDF and return text and snippets"""
|
926 |
try:
|
|
|
1090 |
|
1091 |
# Tab 3: Model Processing
|
1092 |
with gr.Tab("3οΈβ£ Model Processing"):
|
1093 |
+
with gr.Row():
|
1094 |
+
with gr.Column(scale=1):
|
1095 |
+
model_choice = gr.Radio(
|
1096 |
+
choices=list(MODEL_CONTEXT_SIZES.keys()),
|
1097 |
+
value="Clipboard only",
|
1098 |
+
label="π€ Provider Selection"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1099 |
)
|
1100 |
|
1101 |
+
# Model-specific option containers
|
1102 |
+
with gr.Column(visible=False) as openai_options:
|
1103 |
+
openai_model = gr.Dropdown(
|
1104 |
+
choices=list(MODEL_CONTEXT_SIZES["OpenAI ChatGPT"].keys()),
|
1105 |
+
value="gpt-3.5-turbo",
|
1106 |
+
label="OpenAI Model"
|
1107 |
+
)
|
1108 |
+
openai_api_key = gr.Textbox(
|
1109 |
+
label="π OpenAI API Key",
|
1110 |
+
type="password"
|
1111 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1112 |
|
1113 |
+
with gr.Column(visible=False) as hf_options:
|
1114 |
+
hf_model = gr.Dropdown(
|
1115 |
+
choices=list(MODEL_CONTEXT_SIZES["HuggingFace Inference"].keys()),
|
1116 |
+
label="π§ HuggingFace Model",
|
1117 |
+
value="mistralai/Mistral-7B-Instruct-v0.3",
|
1118 |
+
allow_custom_value=True
|
1119 |
+
)
|
1120 |
+
hf_custom_model = gr.Textbox(
|
1121 |
+
label="Custom Model ID",
|
1122 |
+
placeholder="Enter custom model ID...",
|
1123 |
+
visible=False
|
1124 |
+
)
|
1125 |
+
hf_api_key = gr.Textbox(
|
1126 |
+
label="π HuggingFace API Key",
|
1127 |
+
type="password"
|
1128 |
+
)
|
1129 |
+
|
1130 |
+
with gr.Column(visible=False) as groq_options:
|
1131 |
+
groq_model = gr.Dropdown(
|
1132 |
+
choices=list(model_registry.groq_models.keys()),
|
1133 |
+
value=list(model_registry.groq_models.keys())[0] if model_registry.groq_models else None,
|
1134 |
+
label="Groq Model"
|
1135 |
+
)
|
1136 |
+
groq_api_key = gr.Textbox(
|
1137 |
+
label="π Groq API Key",
|
1138 |
+
type="password"
|
1139 |
+
)
|
1140 |
+
groq_refresh_btn = gr.Button("π Refresh Groq Models")
|
1141 |
+
|
1142 |
+
with gr.Column(visible=False) as glhf_options:
|
1143 |
+
glhf_api_key = gr.Textbox(
|
1144 |
+
label="π GLHF API Key",
|
1145 |
+
type="password"
|
1146 |
+
)
|
1147 |
+
glhf_model = gr.Dropdown(
|
1148 |
+
choices=list(MODEL_CONTEXT_SIZES["GLHF API"].keys()),
|
1149 |
+
value="mistralai/Mistral-7B-Instruct-v0.3",
|
1150 |
+
label="Model Selection"
|
1151 |
+
)
|
1152 |
+
|
1153 |
+
with gr.Column(visible=False) as cohere_options:
|
1154 |
+
cohere_api_key = gr.Textbox(
|
1155 |
+
label="π Cohere API Key",
|
1156 |
+
type="password"
|
1157 |
+
)
|
1158 |
+
cohere_model = gr.Dropdown(
|
1159 |
+
choices=list(MODEL_CONTEXT_SIZES["Cohere API"].keys()),
|
1160 |
+
value="command-r-plus-08-2024",
|
1161 |
+
label="Cohere Model"
|
1162 |
+
)
|
1163 |
+
|
1164 |
+
# Action Buttons Row
|
1165 |
+
with gr.Row():
|
1166 |
+
|
1167 |
+
# Copy to Clipboard button with robust fallbacks
|
1168 |
+
copy_button = gr.HTML("""
|
1169 |
+
<div style="text-align: center; margin: 10px;">
|
1170 |
+
<button
|
1171 |
+
onclick="
|
1172 |
+
try {
|
1173 |
+
const promptArea =
|
1174 |
+
document.querySelector('#generated_prompt textarea') ||
|
1175 |
+
document.querySelector('textarea#generated_prompt') ||
|
1176 |
+
document.querySelector('.generated_prompt textarea') ||
|
1177 |
+
Array.from(document.querySelectorAll('textarea')).find(el => el.value.includes('Summarize'));
|
1178 |
+
|
1179 |
+
if (promptArea && promptArea.value) {
|
1180 |
+
navigator.clipboard.writeText(promptArea.value)
|
1181 |
+
.then(() => {
|
1182 |
+
this.textContent = 'β
Copied!';
|
1183 |
setTimeout(() => {
|
1184 |
+
this.textContent = 'π Copy to Clipboard';
|
1185 |
}, 2000);
|
1186 |
+
})
|
1187 |
+
.catch(err => {
|
1188 |
+
console.error('Modern copy failed:', err);
|
1189 |
+
promptArea.select();
|
1190 |
+
document.execCommand('copy');
|
1191 |
+
this.textContent = 'β
Copied using fallback!';
|
|
|
|
|
|
|
|
|
1192 |
setTimeout(() => {
|
1193 |
+
this.textContent = 'π Copy to Clipboard';
|
1194 |
}, 2000);
|
1195 |
+
});
|
1196 |
+
} else {
|
1197 |
+
this.textContent = 'β No text found';
|
1198 |
+
setTimeout(() => {
|
1199 |
+
this.textContent = 'π Copy to Clipboard';
|
1200 |
+
}, 2000);
|
1201 |
+
}
|
1202 |
+
} catch (err) {
|
1203 |
+
console.error('Copy error:', err);
|
1204 |
+
this.textContent = 'β Copy failed';
|
1205 |
setTimeout(() => {
|
1206 |
+
this.textContent = 'π Copy to Clipboard';
|
1207 |
}, 2000);
|
1208 |
}
|
1209 |
+
"
|
1210 |
+
style="
|
1211 |
+
padding: 10px 20px;
|
1212 |
+
background-color: #2C3E50;
|
1213 |
+
color: white;
|
1214 |
+
border: none;
|
1215 |
+
border-radius: 5px;
|
1216 |
+
font-weight: bold;
|
1217 |
+
cursor: pointer;
|
1218 |
+
transition: background-color 0.3s ease;
|
1219 |
+
"
|
1220 |
+
onmouseover="this.style.backgroundColor='#34495E'"
|
1221 |
+
onmouseout="this.style.backgroundColor='#2C3E50'"
|
1222 |
+
>
|
1223 |
+
π Copy to Clipboard
|
1224 |
+
</button>
|
1225 |
+
</div>
|
1226 |
+
""")
|
1227 |
+
|
1228 |
+
send_to_model_btn = gr.Button("π Send to Model", variant="primary", interactive=False)
|
1229 |
+
|
1230 |
+
# Restore the robust ChatGPT button implementation
|
1231 |
+
chatgpt_button = gr.HTML("""
|
1232 |
+
<div style="text-align: center; margin: 10px;">
|
1233 |
+
<button
|
1234 |
+
onclick="
|
1235 |
+
try {
|
1236 |
+
const promptArea =
|
1237 |
+
document.querySelector('#generated_prompt textarea') ||
|
1238 |
+
document.querySelector('textarea#generated_prompt') ||
|
1239 |
+
document.querySelector('.generated_prompt textarea') ||
|
1240 |
+
Array.from(document.querySelectorAll('textarea')).find(el => el.value.includes('Summarize'));
|
1241 |
+
|
1242 |
+
if (promptArea && promptArea.value) {
|
1243 |
+
navigator.clipboard.writeText(promptArea.value)
|
1244 |
+
.then(() => {
|
1245 |
+
this.textContent = 'β
Copied! Opening ChatGPT...';
|
1246 |
+
setTimeout(() => {
|
1247 |
+
window.open('https://chat.openai.com/', '_blank');
|
1248 |
+
setTimeout(() => {
|
1249 |
+
this.textContent = 'π Copy & Open ChatGPT';
|
1250 |
+
}, 2000);
|
1251 |
+
}, 500);
|
1252 |
+
})
|
1253 |
+
.catch(err => {
|
1254 |
+
console.error('Modern copy failed:', err);
|
1255 |
+
promptArea.select();
|
1256 |
+
document.execCommand('copy');
|
1257 |
+
this.textContent = 'β
Copied! Opening ChatGPT...';
|
1258 |
+
setTimeout(() => {
|
1259 |
+
window.open('https://chat.openai.com/', '_blank');
|
1260 |
+
setTimeout(() => {
|
1261 |
+
this.textContent = 'π Copy & Open ChatGPT';
|
1262 |
+
}, 2000);
|
1263 |
+
}, 500);
|
1264 |
+
});
|
1265 |
+
} else {
|
1266 |
+
this.textContent = 'β No prompt found. Generate one first.';
|
1267 |
+
setTimeout(() => {
|
1268 |
+
this.textContent = 'π Copy & Open ChatGPT';
|
1269 |
+
}, 2000);
|
1270 |
+
}
|
1271 |
+
} catch (err) {
|
1272 |
+
console.error('Copy error:', err);
|
1273 |
+
this.textContent = 'β Copy failed. Try again.';
|
1274 |
+
setTimeout(() => {
|
1275 |
+
this.textContent = 'π Copy & Open ChatGPT';
|
1276 |
+
}, 2000);
|
1277 |
+
}
|
1278 |
+
"
|
1279 |
+
style="
|
1280 |
+
padding: 10px 20px;
|
1281 |
+
background-color: #2C3E50;
|
1282 |
+
color: white;
|
1283 |
+
border: none;
|
1284 |
+
border-radius: 5px;
|
1285 |
+
font-weight: bold;
|
1286 |
+
cursor: pointer;
|
1287 |
+
transition: background-color 0.3s ease;
|
1288 |
+
"
|
1289 |
+
onmouseover="this.style.backgroundColor='#34495E'"
|
1290 |
+
onmouseout="this.style.backgroundColor='#2C3E50'"
|
1291 |
+
>
|
1292 |
+
π Copy & Open ChatGPT
|
1293 |
+
</button>
|
1294 |
+
</div>
|
1295 |
+
""")
|
1296 |
+
|
1297 |
+
# JavaScript for model choice handling
|
1298 |
+
gr.HTML("""
|
1299 |
+
<script>
|
1300 |
+
// Enable/disable send button based on selection
|
1301 |
+
document.querySelector('input[name="model_choice"]').addEventListener('change', function(e) {
|
1302 |
+
const sendButton = document.querySelector('button:contains("Send to Model")');
|
1303 |
+
if (sendButton) {
|
1304 |
+
sendButton.disabled = (e.target.value === 'Clipboard only');
|
1305 |
+
}
|
1306 |
+
});
|
1307 |
+
</script>
|
1308 |
+
""")
|
1309 |
+
|
1310 |
+
# Summary section
|
1311 |
+
with gr.Column(scale=1):
|
1312 |
+
summary_output = gr.Textbox(
|
1313 |
+
label="π Summary",
|
1314 |
+
lines=15,
|
1315 |
+
max_lines=50,
|
1316 |
+
show_copy_button=True,
|
1317 |
+
elem_id="summary_output"
|
1318 |
+
)
|
1319 |
+
|
1320 |
+
# Summary actions row
|
1321 |
+
with gr.Row():
|
1322 |
+
copy_summary_btn = gr.Button("π Copy Summary", size="sm")
|
1323 |
+
download_summary = gr.File(label="π₯ Download Summary")
|
1324 |
+
|
1325 |
+
# Status display
|
1326 |
+
clipboard_status = gr.HTML(elem_id="clipboard_status")
|
1327 |
|
1328 |
# Hidden components for file handling
|
1329 |
download_files = gr.Files(label="π₯ Downloads", visible=False)
|
|
|
1384 |
def handle_groq_model_change(model_name):
|
1385 |
"""Handle Groq model selection change"""
|
1386 |
return update_context_size("Groq API", model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1387 |
|
1388 |
# PDF Processing Handlers
|
1389 |
def handle_pdf_process(pdf, fmt, ctx_size): # Remove md_eng parameter
|
|
|
1487 |
outputs=[progress_status, generated_prompt, download_prompt]
|
1488 |
)
|
1489 |
|
1490 |
+
|
1491 |
+
# copy_button.click(
|
1492 |
+
# fn=copy_to_clipboard,
|
1493 |
+
# inputs=[generated_prompt],
|
1494 |
+
# outputs=[clipboard_status]
|
1495 |
+
# )
|
1496 |
+
|
1497 |
+
# copy_summary_btn.click(
|
1498 |
+
# fn=None,
|
1499 |
+
# inputs=[],
|
1500 |
+
# outputs=[],
|
1501 |
+
# _js=copy_summary_js
|
1502 |
+
# )
|
1503 |
+
|
1504 |
# Snippet handling
|
1505 |
snippet_selector.change(
|
1506 |
handle_snippet_selection,
|
|
|
1508 |
outputs=[progress_status, generated_prompt, download_snippet] # Connect download_snippet
|
1509 |
)
|
1510 |
|
|
|
1511 |
# Model selection
|
1512 |
model_choice.change(
|
1513 |
handle_model_selection,
|
|
|
1519 |
cohere_options,
|
1520 |
glhf_options,
|
1521 |
context_size,
|
1522 |
+
send_to_model_btn,
|
1523 |
+
hf_model, # For updating model choices
|
1524 |
+
hf_custom_model # Add this to update custom model visibility
|
1525 |
]
|
1526 |
)
|
1527 |
|