from typing import Dict, Any from aiflows.base_flows import CompositeFlow from aiflows.utils import logging from .ControllerAtomicFlow import ControllerAtomicFlow from aiflows.interfaces import KeyInterface logging.set_verbosity_debug() log = logging.get_logger(__name__) class ControllerExecutorFlow(CompositeFlow): """ This class implements a ControllerExecutorFlow. It's composed of a ControllerAtomicFlow and an ExecutorFlow. Where typically the ControllerAtomicFlow is uses a LLM to decide which command to call and the ExecutorFlow (branching flow) is used to execute the command. It contains the following subflows: - A Controller Atomic Flow: It is a flow that to decides which command to get closer to completing it's task of accomplishing a given goal. - An Executor Flow: It is a branching flow that uses the executes the command instructed by the ControllerAtomicFlow. An illustration of the flow is as follows: goal -----|-----> ControllerFlow----->|-----> (anwser,status) ^ | | | | v |<----- ExecutorFlow <------| *Configuration Parameters*: - `name` (str): The name of the flow. Default: "CtrlEx" - `description` (str): A description of the flow. This description is used to generate the help message of the flow. Default: "ControllerExecutor (i.e., MRKL, ReAct) interaction implementation with Flows that approaches the problem solving in two phases: one Flow chooses the next step and another Flow executes it. This is repeated until the controller Flow concludes on an answer." - `max_rounds` (int): The maximum number of rounds the flow can run for. Default: 30. - `subflows_config` (Dict[str,Any]): A dictionary of the subflows configurations. Default: - `Controller`: The configuration of the Controller Flow. By default, it a ControllerAtomicFlow. Default parameters: - `finish` (Dict[str,Any]): The configuration of the finish command. Default parameters: - `description` (str): The description of the command. Default: "Signal that the objective has been satisfied, and returns the answer to the user." - `input_args` (List[str]): The input arguments of the command. Default: ["answer"] - All other parameters are inherited from the default configuration of ControllerAtomicFlow (see ControllerAtomicFlow) - `Executor`: The configuration of the Executor Flow. By default, it's a BranchingFlow. There are no default parameters, the flow parameter to to be defined is: - `subflows_config` (Dict[str,Any]): A dictionary of the configuration of the subflows of the branching flow. These subflows are typically also the possible commands of the Controller Flow. Default: [] - `early_exit_key` (str): The key that is used to exit the flow. Default: "EARLY_EXIT" - `topology` (str): The topology of the flow which is "circular". By default, the topology is the one shown in the illustration above (the topology is also described in ControllerExecutorFlow.yaml). *Input Interface*: - `goal` (str): The goal of the controller. Usually asked by the user/human (e.g. "I want to know the occupation and birth date of Michael Jordan.") *Output Interface*: - `answer` (str): The answer of the flow to the query (e.g. "Michael Jordan is a basketball player and business man. He was born on February 17, 1963.") - `status` (str): The status of the flow. It can be "finished" or "unfinished". If the status is "unfinished", it's usually because the maximum amount of rounds was reached before the model found an answer. :param flow_config: The configuration of the flow (see Configuration Parameters). :param subflows: A list of subflows. Required when instantiating the subflow programmatically (it replaces subflows_config from flow_config). """ def __init__(self, flow_config: Dict[str, Any], subflows: Dict[str, Any] = None): super().__init__(flow_config, subflows) def set_up_flow_state(self): super().set_up_flow_state() def run(self,input_data): executor_reply = input_data for round in range(self.flow_config["max_rounds"]): controller_reply = self.ask_subflow("Controller", executor_reply).get_data() if controller_reply["command"] == "finish": return { "EARLY_EXIT": True, "answer": controller_reply["command_args"]["answer"], "status": "finished" } executor_reply = { "observation": self.ask_subflow(controller_reply["command"], controller_reply["command_args"]).get_data() } return { "answer": "The maximum amount of rounds was reached before the model found an answer.", "status": "unfinished" }