cstr commited on
Commit
a8eafe1
·
verified ·
1 Parent(s): 28e0649

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -22
app.py CHANGED
@@ -89,6 +89,20 @@ MODEL_CONTEXT_SIZES = {
89
  "llama-3.1-70b-specdec": 131072,
90
  "llama-3.2-1b-preview": 131072,
91
  "llama-3.2-3b-preview": 131072,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  }
93
  }
94
 
@@ -335,7 +349,9 @@ def build_prompts(snippets: List[str], prompt_instruction: str, custom_prompt: O
335
 
336
  def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
337
  groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
338
- logging.info("send to model starting..")
 
 
339
 
340
  if not prompt or not prompt.strip():
341
  return "Error: No prompt provided", None
@@ -344,27 +360,69 @@ def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_
344
  logging.info("sending to model preparation.")
345
 
346
  # Basic input validation
347
- if model_selection not in ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT"]:
 
348
  return "Error: Invalid model selection", None
349
 
350
- # Model-specific validation - remove HF key check
351
  if model_selection == "Groq API" and not groq_api_key:
352
  return "Error: Groq API key required", None
353
  elif model_selection == "OpenAI ChatGPT" and not openai_api_key:
354
  return "Error: OpenAI API key required", None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
 
356
  def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
357
  groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
358
  """Implementation of model sending with improved error handling."""
359
  logging.info("send to model impl commencing...")
360
 
361
- if model_selection == "Clipboard only":
362
- return "Text copied to clipboard. Use paste for processing.", None
363
-
364
  try:
 
 
 
365
  if model_selection == "HuggingFace Inference":
 
366
  model_id = hf_custom_model if hf_model_choice == "Custom Model" else model_registry.hf_models[hf_model_choice]
367
- summary = send_to_hf_inference(prompt, model_id, hf_api_key)
 
 
 
368
 
369
  elif model_selection == "Groq API":
370
  summary = send_to_groq(prompt, groq_model_choice, groq_api_key)
@@ -372,12 +430,13 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
372
  elif model_selection == "OpenAI ChatGPT":
373
  summary = send_to_openai(prompt, openai_api_key, model=openai_model_choice)
374
 
 
 
 
375
  else:
376
  return "Error: Invalid model selection", None
377
 
378
  # Validate response
379
- logging.info("model told us:", summary)
380
-
381
  if not summary or not isinstance(summary, str):
382
  return "Error: Invalid response from model", None
383
 
@@ -391,12 +450,29 @@ def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model
391
 
392
  except Exception as e:
393
  error_msg = str(e)
394
- if not error_msg: # Handle empty error messages
395
  error_msg = "Unknown error occurred"
396
  logging.error(f"Error in send_to_model_impl: {error_msg}")
397
  return f"Error: {error_msg}", None
398
-
399
  def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  """Send prompt to HuggingFace Inference API with optional authentication."""
401
  try:
402
  # First try without authentication
@@ -451,17 +527,13 @@ def send_to_groq(prompt: str, model_name: str, api_key: str) -> str:
451
  return response.choices[0].message.content
452
  except Exception as e:
453
  logging.error(f"Groq API error: {e}")
454
- return f"Error with Groq API: {str(e)}"
455
 
456
  def send_to_openai(prompt: str, api_key: str, model: str = "gpt-3.5-turbo") -> str:
457
- """Send prompt to OpenAI API using the new v1.0+ client format."""
458
  try:
459
- from openai import OpenAI # Import the new client
460
-
461
- # Initialize the client with the API key
462
  client = OpenAI(api_key=api_key)
463
-
464
- # Create chat completion using new format
465
  response = client.chat.completions.create(
466
  model=model,
467
  messages=[
@@ -473,17 +545,37 @@ def send_to_openai(prompt: str, api_key: str, model: str = "gpt-3.5-turbo") -> s
473
  top_p=0.95
474
  )
475
 
476
- # Extract the response content
477
  if response.choices and len(response.choices) > 0:
478
  return response.choices[0].message.content
479
  else:
480
- return "Error: No response generated"
481
 
482
  except ImportError:
483
- return "Error: Please install the latest version of openai package (pip install --upgrade openai)"
484
  except Exception as e:
485
  logging.error(f"OpenAI API error: {e}")
486
- return f"Error with OpenAI API: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
 
488
  def copy_text_js(element_id: str) -> str:
489
  return f"""function() {{
 
89
  "llama-3.1-70b-specdec": 131072,
90
  "llama-3.2-1b-preview": 131072,
91
  "llama-3.2-3b-preview": 131072,
92
+ },
93
+ "Cohere API": {
94
+ "command-r-plus-08-2024": 131072, # 128k
95
+ "command-r-plus-04-2024": 131072,
96
+ "command-r-plus": 131072,
97
+ "command-r-08-2024": 131072,
98
+ "command-r-03-2024": 131072,
99
+ "command-r": 131072,
100
+ "command": 4096,
101
+ "command-nightly": 131072,
102
+ "command-light": 4096,
103
+ "command-light-nightly": 4096,
104
+ "c4ai-aya-expanse-8b": 8192,
105
+ "c4ai-aya-expanse-32b": 131072,
106
  }
107
  }
108
 
 
349
 
350
  def send_to_model(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
351
  groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
352
+ """Wrapper function for send_to_model_impl with comprehensive error handling."""
353
+
354
+ logging.info("send to model starting...")
355
 
356
  if not prompt or not prompt.strip():
357
  return "Error: No prompt provided", None
 
360
  logging.info("sending to model preparation.")
361
 
362
  # Basic input validation
363
+ valid_selections = ["Clipboard only", "HuggingFace Inference", "Groq API", "OpenAI ChatGPT", "Cohere API"]
364
+ if model_selection not in valid_selections:
365
  return "Error: Invalid model selection", None
366
 
367
+ # Model-specific validation
368
  if model_selection == "Groq API" and not groq_api_key:
369
  return "Error: Groq API key required", None
370
  elif model_selection == "OpenAI ChatGPT" and not openai_api_key:
371
  return "Error: OpenAI API key required", None
372
+
373
+ # Call implementation with error handling
374
+ try:
375
+ logging.info("calling send_to_model_impl.")
376
+ summary, download_file = send_to_model_impl(
377
+ prompt=prompt.strip(),
378
+ model_selection=model_selection,
379
+ hf_model_choice=hf_model_choice,
380
+ hf_custom_model=hf_custom_model,
381
+ hf_api_key=hf_api_key,
382
+ groq_model_choice=groq_model_choice,
383
+ groq_api_key=groq_api_key,
384
+ openai_api_key=openai_api_key,
385
+ openai_model_choice=openai_model_choice
386
+ )
387
+ logging.info("summary received:", summary)
388
+
389
+ if summary is None or not isinstance(summary, str):
390
+ return "Error: No response from model", None
391
+
392
+ return summary, download_file
393
+
394
+ except Exception as impl_error:
395
+ error_msg = str(impl_error)
396
+ if not error_msg:
397
+ error_msg = "Unknown error occurred in model implementation"
398
+ logging.error(f"Model implementation error: {error_msg}")
399
+ return f"Error: {error_msg}", None
400
+
401
+ except Exception as e:
402
+ error_msg = str(e)
403
+ if not error_msg:
404
+ error_msg = "Unknown error occurred"
405
+ logging.error(f"Error in send_to_model: {error_msg}")
406
+ return f"Error: {error_msg}", None
407
+ finally:
408
+ logging.info("send to model completed.")
409
 
410
  def send_to_model_impl(prompt, model_selection, hf_model_choice, hf_custom_model, hf_api_key,
411
  groq_model_choice, groq_api_key, openai_api_key, openai_model_choice):
412
  """Implementation of model sending with improved error handling."""
413
  logging.info("send to model impl commencing...")
414
 
 
 
 
415
  try:
416
+ if model_selection == "Clipboard only":
417
+ return "Text copied to clipboard. Use paste for processing.", None
418
+
419
  if model_selection == "HuggingFace Inference":
420
+ # First try without API key
421
  model_id = hf_custom_model if hf_model_choice == "Custom Model" else model_registry.hf_models[hf_model_choice]
422
+ summary = send_to_hf_inference(prompt, model_id)
423
+ if summary.startswith("Error"):
424
+ if hf_api_key: # If first try failed and we have an API key, try with it
425
+ summary = send_to_hf_inference(prompt, model_id, hf_api_key)
426
 
427
  elif model_selection == "Groq API":
428
  summary = send_to_groq(prompt, groq_model_choice, groq_api_key)
 
430
  elif model_selection == "OpenAI ChatGPT":
431
  summary = send_to_openai(prompt, openai_api_key, model=openai_model_choice)
432
 
433
+ elif model_selection == "Cohere API":
434
+ summary = send_to_cohere(prompt)
435
+
436
  else:
437
  return "Error: Invalid model selection", None
438
 
439
  # Validate response
 
 
440
  if not summary or not isinstance(summary, str):
441
  return "Error: Invalid response from model", None
442
 
 
450
 
451
  except Exception as e:
452
  error_msg = str(e)
453
+ if not error_msg:
454
  error_msg = "Unknown error occurred"
455
  logging.error(f"Error in send_to_model_impl: {error_msg}")
456
  return f"Error: {error_msg}", None
457
+
458
  def send_to_hf_inference(prompt: str, model_name: str, api_key: str = None) -> str:
459
+ """Send prompt to HuggingFace Inference API with optional authentication."""
460
+ try:
461
+ client = InferenceClient(token=api_key) if api_key else InferenceClient()
462
+ response = client.text_generation(
463
+ prompt,
464
+ model=model_name,
465
+ max_new_tokens=500,
466
+ temperature=0.7,
467
+ top_p=0.95,
468
+ repetition_penalty=1.1
469
+ )
470
+ return str(response)
471
+ except Exception as e:
472
+ logging.error(f"HuggingFace inference error: {e}")
473
+ return f"Error with HuggingFace inference: {str(e)}" # Return error message instead of raising
474
+
475
+ def send_to_hf_inference_old(prompt: str, model_name: str, api_key: str = None) -> str:
476
  """Send prompt to HuggingFace Inference API with optional authentication."""
477
  try:
478
  # First try without authentication
 
527
  return response.choices[0].message.content
528
  except Exception as e:
529
  logging.error(f"Groq API error: {e}")
530
+ raise # Re-raise to be handled by caller
531
 
532
  def send_to_openai(prompt: str, api_key: str, model: str = "gpt-3.5-turbo") -> str:
533
+ """Send prompt to OpenAI API."""
534
  try:
535
+ from openai import OpenAI
 
 
536
  client = OpenAI(api_key=api_key)
 
 
537
  response = client.chat.completions.create(
538
  model=model,
539
  messages=[
 
545
  top_p=0.95
546
  )
547
 
 
548
  if response.choices and len(response.choices) > 0:
549
  return response.choices[0].message.content
550
  else:
551
+ raise Exception("No response generated")
552
 
553
  except ImportError:
554
+ raise Exception("Please install the latest version of openai package (pip install --upgrade openai)")
555
  except Exception as e:
556
  logging.error(f"OpenAI API error: {e}")
557
+ raise # Re-raise to be handled by caller
558
+
559
+ def send_to_cohere(prompt: str, api_key: str = None) -> str:
560
+ """Send prompt to Cohere API with optional authentication."""
561
+ try:
562
+ import cohere
563
+ client = cohere.Client(api_key) if api_key else cohere.Client()
564
+
565
+ response = client.chat(
566
+ message=prompt,
567
+ temperature=0.7,
568
+ max_tokens=500,
569
+ )
570
+
571
+ if hasattr(response, 'text'):
572
+ return response.text
573
+ else:
574
+ return "Error: No response text from Cohere"
575
+
576
+ except Exception as e:
577
+ logging.error(f"Cohere API error: {e}")
578
+ return f"Error with Cohere API: {str(e)}" # Return error message instead of raising
579
 
580
  def copy_text_js(element_id: str) -> str:
581
  return f"""function() {{