cstr commited on
Commit
c6a926f
·
verified ·
1 Parent(s): ebf5837

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -21
app.py CHANGED
@@ -529,20 +529,6 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
529
  model_id = f"hf:{glhf_custom_model}"
530
 
531
  summary = send_to_glhf(prompt, glhf_api_key, model_id, use_rate_limits)
532
-
533
- if not summary:
534
- return "Error: No response from model", None
535
-
536
- if not isinstance(summary, str):
537
- return "Error: Invalid response type from model", None
538
-
539
- # Create download file for valid responses
540
- if not summary.startswith("Error"):
541
- with tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.txt') as f:
542
- f.write(summary)
543
- return summary, f.name
544
-
545
- return summary, None
546
 
547
  else:
548
  return "Error: Invalid model selection", None
@@ -594,9 +580,8 @@ def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None, use_
594
 
595
  return apply_rate_limit(_send, 16) if use_rate_limit else _send()
596
 
597
- def send_to_glhf(prompt: str, use_hf_model: bool, model_name: str, custom_model: str,
598
- api_key: str, use_rate_limit: bool = False) -> str:
599
- """Send prompt to GLHF API with model selection."""
600
  def _send():
601
  try:
602
  import openai
@@ -604,8 +589,6 @@ def send_to_glhf(prompt: str, use_hf_model: bool, model_name: str, custom_model:
604
  api_key=api_key,
605
  base_url="https://glhf.chat/api/openai/v1",
606
  )
607
-
608
- model_id = f"hf:{model_name if use_hf_model else custom_model}"
609
 
610
  # For GLHF, always use streaming for reliability
611
  completion = client.chat.completions.create(
@@ -1138,15 +1121,18 @@ with gr.Blocks(css="""
1138
  gr.Dropdown(choices=[]) # not used
1139
  ]
1140
  elif choice == "GLHF API":
 
 
1141
  return [
1142
- gr.update(visible=False), # hf_options
1143
  gr.update(visible=False), # groq_options
1144
  gr.update(visible=False), # openai_options
1145
  gr.update(visible=False), # cohere_options
1146
  gr.update(visible=True), # glhf_options
1147
  gr.update(value=ctx_size), # context_size
1148
- gr.Dropdown(choices=[]) # not used
1149
  ]
 
1150
 
1151
  # Default return for "Clipboard only" or other options
1152
  return [
 
529
  model_id = f"hf:{glhf_custom_model}"
530
 
531
  summary = send_to_glhf(prompt, glhf_api_key, model_id, use_rate_limits)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532
 
533
  else:
534
  return "Error: Invalid model selection", None
 
580
 
581
  return apply_rate_limit(_send, 16) if use_rate_limit else _send()
582
 
583
+ def send_to_glhf(prompt: str, api_key: str, model_id: str, use_rate_limit: bool = False) -> str:
584
+ """Send prompt to GLHF API."""
 
585
  def _send():
586
  try:
587
  import openai
 
589
  api_key=api_key,
590
  base_url="https://glhf.chat/api/openai/v1",
591
  )
 
 
592
 
593
  # For GLHF, always use streaming for reliability
594
  completion = client.chat.completions.create(
 
1121
  gr.Dropdown(choices=[]) # not used
1122
  ]
1123
  elif choice == "GLHF API":
1124
+ # Always show HuggingFace models for GLHF since they're used in both cases
1125
+ model_choices = list(model_registry.hf_models.keys())
1126
  return [
1127
+ gr.update(visible=True), # hf_options - Keep visible for model selection
1128
  gr.update(visible=False), # groq_options
1129
  gr.update(visible=False), # openai_options
1130
  gr.update(visible=False), # cohere_options
1131
  gr.update(visible=True), # glhf_options
1132
  gr.update(value=ctx_size), # context_size
1133
+ gr.Dropdown(choices=model_choices, value="Mixtral 7B") # Use HF models
1134
  ]
1135
+
1136
 
1137
  # Default return for "Clipboard only" or other options
1138
  return [