kpal002 commited on
Commit
15aee67
1 Parent(s): 34d00c4

Update RAG_utils.py

Browse files
Files changed (1) hide show
  1. RAG_utils.py +25 -63
RAG_utils.py CHANGED
@@ -618,80 +618,42 @@ class PDFQueryEngine:
618
  logger.error(f"Error during query engine setup: {e}")
619
  raise
620
 
621
- # def evaluate_with_llm(self, reg_result: Any, peer_result: Any, guidelines_result: Any, queries: List[str]) -> Tuple[int, List[int], int, float, List[str]]:
622
- # """
623
- # Evaluate documents using a language model based on various criteria.
624
- # Args:
625
- # reg_result (Any): Result related to registration.
626
- # peer_result (Any): Result related to peer review.
627
- # guidelines_result (Any): Result related to following guidelines.
628
- # queries (List[str]): A list of queries to be processed.
629
- # Returns:
630
- # Tuple[int, List[int], int, float, List[str]]: A tuple containing the total score, a list of scores per criteria.
631
- # """
632
-
633
- # logger.info("Starting evaluation with LLM.")
634
- # self.config_manager.load_config("few_shot", "few_shot.json")
635
- # query_engine = self.setup_query_engine()
636
-
637
- # total_score = 0
638
- # criteria_met = 0
639
- # reasoning = []
640
 
641
- # for j, query in enumerate(queries):
642
- # # Handle special cases based on the value of j and other conditions
643
- # if j == 1 and reg_result:
644
- # extracted_data = {"score": 1, "reasoning": reg_result[0]}
645
- # elif j == 2 and guidelines_result:
646
- # extracted_data = {"score": 1, "reasoning": "The article is published in a journal following EQUATOR-NETWORK reporting guidelines"}
647
- # elif j == 8 and (guidelines_result or peer_result):
648
- # extracted_data = {"score": 1, "reasoning": "The article is published in a peer-reviewed journal."}
649
- # else:
650
-
651
- # # Execute the query
652
- # result = query_engine.query(query).response
653
- # extracted_data = self.base_utils.extract_score_reasoning(result)
654
-
655
-
656
- # # Validate and accumulate the scores
657
- # extracted_data_score = 0 if extracted_data.get("score") is None else int(extracted_data.get("score"))
658
- # if extracted_data_score > 0:
659
- # criteria_met += 1
660
- # reasoning.append(extracted_data["reasoning"])
661
- # total_score += extracted_data_score
662
-
663
- # score_percentage = (float(total_score) / len(queries)) * 100
664
- # logger.info("Evaluation completed.")
665
- # return total_score, criteria_met, score_percentage, reasoning
666
-
667
- async def evaluate_with_llm_async(self, reg_result: Any, peer_result: Any, guidelines_result: Any, queries: List[str]) -> Tuple[int, List[int], int, float, List[str]]:
668
  logger.info("Starting evaluation with LLM.")
669
  self.config_manager.load_config("few_shot", "few_shot.json")
670
- # Setup your query engine, if it's using aiohttp this is where you'd configure it
671
 
672
  total_score = 0
673
  criteria_met = 0
674
  reasoning = []
675
-
676
- async def handle_query(session, j, query):
 
677
  if j == 1 and reg_result:
678
- return {"score": 1, "reasoning": reg_result[0]}
679
  elif j == 2 and guidelines_result:
680
- return {"score": 1, "reasoning": "The article is published in a journal following EQUATOR-NETWORK reporting guidelines"}
681
  elif j == 8 and (guidelines_result or peer_result):
682
- return {"score": 1, "reasoning": "The article is published in a peer-reviewed journal."}
683
  else:
684
- # Here, adapt your query engine or direct API call to use aiohttp
685
- async with session.post('Your API Endpoint', json={'query': query}) as response:
686
- result = await response.json()
687
- return self.base_utils.extract_score_reasoning(result)
688
 
689
- async with aiohttp.ClientSession() as session:
690
- tasks = [handle_query(session, j, query) for j, query in enumerate(queries)]
691
- results = await asyncio.gather(*tasks)
692
 
693
- # Process results
694
- for extracted_data in results:
695
  extracted_data_score = 0 if extracted_data.get("score") is None else int(extracted_data.get("score"))
696
  if extracted_data_score > 0:
697
  criteria_met += 1
@@ -700,8 +662,8 @@ class PDFQueryEngine:
700
 
701
  score_percentage = (float(total_score) / len(queries)) * 100
702
  logger.info("Evaluation completed.")
703
- return total_score, criteria_met, len(queries), score_percentage, reasoning
704
-
705
 
706
 
707
  class MixtralLLM(CustomLLM):
 
618
  logger.error(f"Error during query engine setup: {e}")
619
  raise
620
 
621
+ def evaluate_with_llm(self, reg_result: Any, peer_result: Any, guidelines_result: Any, queries: List[str]) -> Tuple[int, List[int], int, float, List[str]]:
622
+ """
623
+ Evaluate documents using a language model based on various criteria.
624
+ Args:
625
+ reg_result (Any): Result related to registration.
626
+ peer_result (Any): Result related to peer review.
627
+ guidelines_result (Any): Result related to following guidelines.
628
+ queries (List[str]): A list of queries to be processed.
629
+ Returns:
630
+ Tuple[int, List[int], int, float, List[str]]: A tuple containing the total score, a list of scores per criteria.
631
+ """
 
 
 
 
 
 
 
 
632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633
  logger.info("Starting evaluation with LLM.")
634
  self.config_manager.load_config("few_shot", "few_shot.json")
635
+ query_engine = self.setup_query_engine()
636
 
637
  total_score = 0
638
  criteria_met = 0
639
  reasoning = []
640
+
641
+ for j, query in enumerate(queries):
642
+ # Handle special cases based on the value of j and other conditions
643
  if j == 1 and reg_result:
644
+ extracted_data = {"score": 1, "reasoning": reg_result[0]}
645
  elif j == 2 and guidelines_result:
646
+ extracted_data = {"score": 1, "reasoning": "The article is published in a journal following EQUATOR-NETWORK reporting guidelines"}
647
  elif j == 8 and (guidelines_result or peer_result):
648
+ extracted_data = {"score": 1, "reasoning": "The article is published in a peer-reviewed journal."}
649
  else:
650
+
651
+ # Execute the query
652
+ result = query_engine.query(query).response
653
+ extracted_data = self.base_utils.extract_score_reasoning(result)
654
 
 
 
 
655
 
656
+ # Validate and accumulate the scores
 
657
  extracted_data_score = 0 if extracted_data.get("score") is None else int(extracted_data.get("score"))
658
  if extracted_data_score > 0:
659
  criteria_met += 1
 
662
 
663
  score_percentage = (float(total_score) / len(queries)) * 100
664
  logger.info("Evaluation completed.")
665
+ return total_score, criteria_met, score_percentage, reasoning
666
+
667
 
668
 
669
  class MixtralLLM(CustomLLM):