nbaldwin commited on
Commit
f56bea9
1 Parent(s): 1015935

readme and demo

Browse files
Files changed (6) hide show
  1. AutoGPTFlow.py +115 -2
  2. README.md +158 -12
  3. __init__.py +3 -3
  4. AutoGPT.yaml → demo.yaml +7 -0
  5. pip_requirements.txt +8 -1
  6. run.py +16 -11
AutoGPTFlow.py CHANGED
@@ -12,7 +12,67 @@ from flow_modules.aiflows.ControllerExecutorFlowModule import ControllerAtomicFl
12
  from flow_modules.aiflows.VectorStoreFlowModule import ChromaDBFlow
13
 
14
  class AutoGPTFlow(CircularFlow):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def _on_reach_max_round(self):
 
16
  self._state_update_dict({
17
  "answer": "The maximum amount of rounds was reached before the model found an answer.",
18
  "status": "unfinished"
@@ -20,6 +80,13 @@ class AutoGPTFlow(CircularFlow):
20
 
21
  @staticmethod
22
  def _get_memory_key(flow_state):
 
 
 
 
 
 
 
23
  goal = flow_state.get("goal")
24
  last_command = flow_state.get("command")
25
  last_command_args = flow_state.get("command_args")
@@ -53,7 +120,16 @@ class AutoGPTFlow(CircularFlow):
53
 
54
  @CircularFlow.input_msg_payload_builder
55
  def prepare_memory_read_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
56
- """A (very) basic example implementation of how the memory retrieval could be constructed."""
 
 
 
 
 
 
 
 
 
57
  query = self._get_memory_key(flow_state)
58
 
59
  return {
@@ -63,12 +139,30 @@ class AutoGPTFlow(CircularFlow):
63
 
64
  @CircularFlow.output_msg_payload_processor
65
  def prepare_memory_read_output(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow):
 
 
 
 
 
 
 
 
 
66
  retrieved_memories = output_payload["retrieved"][0][1:]
67
  return {"memory": "\n".join(retrieved_memories)}
68
 
69
  @CircularFlow.input_msg_payload_builder
70
  def prepare_memory_write_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
71
- """An (very) example (not optimized) implementation of how the memory population could be implemented."""
 
 
 
 
 
 
 
 
 
72
  query = self._get_memory_key(flow_state)
73
 
74
  return {
@@ -79,6 +173,15 @@ class AutoGPTFlow(CircularFlow):
79
  @CircularFlow.output_msg_payload_processor
80
  def detect_finish_or_continue(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
81
  str, Any]:
 
 
 
 
 
 
 
 
 
82
  command = output_payload["command"]
83
  if command == "finish":
84
  return {
@@ -92,6 +195,16 @@ class AutoGPTFlow(CircularFlow):
92
  @CircularFlow.output_msg_payload_processor
93
  def detect_finish_in_human_input(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
94
  str, Any]:
 
 
 
 
 
 
 
 
 
 
95
  human_feedback = output_payload["human_input"]
96
  if human_feedback.strip().lower() == "q":
97
  return {
 
12
  from flow_modules.aiflows.VectorStoreFlowModule import ChromaDBFlow
13
 
14
  class AutoGPTFlow(CircularFlow):
15
+ """ This class implements a (very basic) AutoGPT flow. It is a flow that consists of multiple sub-flows that are executed circularly. It Contains the following subflows:
16
+
17
+ - A Controller Flow: A Flow that controls which subflow of the Executor Flow to execute next.
18
+ - A Memory Flow: A Flow used to save and retrieve messages or memories which might be useful for the Controller Flow.
19
+ - A HumanFeedback Flow: A flow use to get feedback from the user/human.
20
+ - A Executor Flow: A Flow that executes commands generated by the Controller Flow. Typically it's a branching flow (see BranchingFlow) and the commands are which branch to execute next.
21
+
22
+ An illustration of the flow is as follows:
23
+
24
+ | -------> Memory Flow -------> Controller Flow ------->|
25
+ ^ |
26
+ | |
27
+ | v
28
+ | <----- HumanFeedback Flow <------- Executor Flow <----|
29
+
30
+ *Configuration Parameters*:
31
+
32
+ - `name` (str): The name of the flow. Default is "AutoGPTFlow".
33
+ - `description` (str): A description of the flow. Default is "An example implementation of AutoGPT with Flows."
34
+ - `max_rounds` (int): The maximum number of rounds the circular flow can run for. Default is 30.
35
+ - `early_exit_key` (str): The key that is used to terminate the flow early. Default is "EARLY_EXIT".
36
+ - `subflows_config` (Dict[str,Any]): A dictionary of subflows configurations. Default:
37
+ - `Controller` (Dict[str,Any]): The configuration of the Controller Flow. By default the controller flow is a ControllerAtomicFlow (see ControllerExecutorFlowModule). It's default values are
38
+ defined in ControllerAtomicFlow.yaml of the ControllerExecutorFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
39
+ - `finish` (Dict[str,Any]): The configuration of the finish command (used to terminate the flow early when the controller has accomplished its goal).
40
+ - `description` (str): The description of the command. Default is "The finish command is used to terminate the flow early when the controller has accomplished its goal."
41
+ - `input_args` (List[str]): The list of expected keys to run the finish command. Default is ["answer"].
42
+ - `human_message_prompt_template`(Dict[str,Any]): The prompt template used to generate the message that is shown to the user/human when the finish command is executed. Default is:
43
+ - `template` (str): The template of the humand message prompt (see AutoGPTFlow.yaml for default template)
44
+ - `input_variables` (List[str]): The list of variables to be included in the template. Default is ["observation", "human_feedback", "memory"].
45
+ - `ìnput_interface_initialized` (List[str]): The input interface that Controller Flow expects except for the first time in the flow. Default is ["observation", "human_feedback", "memory"].
46
+ - `Executor` (Dict[str,Any]): The configuration of the Executor Flow. By default the executor flow is a Branching Flow (see BranchingFlow). It's default values are the default values of the BranchingFlow. Fields to define:
47
+ - `subflows_config` (Dict[str,Any]): A Dictionary of subflows configurations.The keys are the names of the subflows and the values are the configurations of the subflows. Each subflow is a branch of the branching flow.
48
+ - `HumanFeedback` (Dict[str,Any]): The configuration of the HumanFeedback Flow. By default the human feedback flow is a HumanStandardInputFlow (see HumanStandardInputFlowModule ).
49
+ It's default values are specified in the REAMDE.md of HumanStandardInputFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
50
+ - `request_multi_line_input_flag` (bool): Flag to request multi-line input. Default is False.
51
+ - `query_message_prompt_template` (Dict[str,Any]): The prompt template presented to the user/human to request input. Default is:
52
+ - `template` (str): The template of the query message prompt (see AutoGPTFlow.yaml for default template)
53
+ - `input_variables` (List[str]): The list of variables to be included in the template. Default is ["goal","command","command_args",observation"]
54
+ - input_interface_initialized (List[str]): The input interface that HumanFeeback Flow expects except for the first time in the flow. Default is ["goal","command","command_args",observation"]
55
+ - `Memory` (Dict[str,Any]): The configuration of the Memory Flow. By default the memory flow is a ChromaDBFlow (see VectorStoreFlowModule). It's default values are defined in ChromaDBFlow.yaml of the VectorStoreFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
56
+ - `n_results`: The number of results to retrieve from the memory. Default is 2.
57
+ - `topology` (List[Dict[str,Any]]): The topology of the flow which is "circular". By default, the topology is the one shown in the illustration above (the topology is also described in AutoGPTFlow.yaml).
58
+
59
+
60
+ *Input Interface*:
61
+
62
+ - `goal` (str): The goal of the flow.
63
+
64
+ *Output Interface*:
65
+
66
+ - `answer` (str): The answer of the flow.
67
+ - `status` (str): The status of the flow. It can be "finished" or "unfinished".
68
+
69
+ :param flow_config: The configuration of the flow. Contains the parameters described above and the parameters required by the parent class (CircularFlow).
70
+ :type flow_config: Dict[str,Any]
71
+ :param subflows: A list of subflows constituating the circular flow. Required when instantiating the subflow programmatically (it replaces subflows_config from flow_config).
72
+ :type subflows: List[Flow]
73
+ """
74
  def _on_reach_max_round(self):
75
+ """ This method is called when the flow reaches the max_rounds."""
76
  self._state_update_dict({
77
  "answer": "The maximum amount of rounds was reached before the model found an answer.",
78
  "status": "unfinished"
 
80
 
81
  @staticmethod
82
  def _get_memory_key(flow_state):
83
+ """ This method returns the memory key that is used to retrieve memories from the ChromaDB model.
84
+
85
+ :param flow_state: The state of the flow
86
+ :type flow_state: Dict[str, Any]
87
+ :return: The current context
88
+ :rtype: str
89
+ """
90
  goal = flow_state.get("goal")
91
  last_command = flow_state.get("command")
92
  last_command_args = flow_state.get("command_args")
 
120
 
121
  @CircularFlow.input_msg_payload_builder
122
  def prepare_memory_read_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
123
+ """ This method prepares the input for the Memory Flow. It is called before the Memory Flow is called.
124
+ A (very) basic example implementation of how the memory retrieval could be constructed.
125
+
126
+ :param flow_state: The state of the flow
127
+ :type flow_state: Dict[str, Any]
128
+ :param dst_flow: The destination flow
129
+ :type dst_flow: Flow
130
+ :return: The input message for the Memory Flow
131
+ :rtype: Dict[str, Any]
132
+ """
133
  query = self._get_memory_key(flow_state)
134
 
135
  return {
 
139
 
140
  @CircularFlow.output_msg_payload_processor
141
  def prepare_memory_read_output(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow):
142
+ """ This method processes the output of the Memory Flow. It is called after the Memory Flow is called.
143
+
144
+ :param output_payload: The output payload of the Memory Flow
145
+ :type output_payload: Dict[str, Any]
146
+ :param src_flow: The source flow
147
+ :type src_flow: Flow
148
+ :return: The processed output payload
149
+ :rtype: Dict[str, Any]
150
+ """
151
  retrieved_memories = output_payload["retrieved"][0][1:]
152
  return {"memory": "\n".join(retrieved_memories)}
153
 
154
  @CircularFlow.input_msg_payload_builder
155
  def prepare_memory_write_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
156
+ """ This method prepares the input for the Memory Flow. It is called before the Memory Flow is called.
157
+ A (very) basic example implementation of how the memory population could be constructed.
158
+
159
+ :param flow_state: The state of the flow
160
+ :type flow_state: Dict[str, Any]
161
+ :param dst_flow: The destination flow
162
+ :type dst_flow: Flow
163
+ :return: The input message to write the Memory Flow
164
+ :rtype: Dict[str, Any]
165
+ """""
166
  query = self._get_memory_key(flow_state)
167
 
168
  return {
 
173
  @CircularFlow.output_msg_payload_processor
174
  def detect_finish_or_continue(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
175
  str, Any]:
176
+ """ This method detects whether the Controller flow has generated a "finish" command or not to terminate the flow. . It is called after the Controller Flow is called.
177
+
178
+ :param output_payload: The output payload of the Controller Flow
179
+ :type output_payload: Dict[str, Any]
180
+ :param src_flow: The source flow
181
+ :type src_flow: Flow
182
+ :return: The processed output payload
183
+ :rtype: Dict[str, Any]
184
+ """
185
  command = output_payload["command"]
186
  if command == "finish":
187
  return {
 
195
  @CircularFlow.output_msg_payload_processor
196
  def detect_finish_in_human_input(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
197
  str, Any]:
198
+ """ This method detects whether the HumanFeedback (the human/user) flow has generated a "finish" command or not to terminate the flow. It is called after the HumanFeedback Flow is called.
199
+
200
+ :param output_payload: The output payload of the HumanFeedback Flow
201
+ :type output_payload: Dict[str, Any]
202
+ :param src_flow: The source flow
203
+ :type src_flow: Flow
204
+ :return: The processed output payload
205
+ :rtype: Dict[str, Any]
206
+ """
207
+
208
  human_feedback = output_payload["human_input"]
209
  if human_feedback.strip().lower() == "q":
210
  return {
README.md CHANGED
@@ -1,26 +1,172 @@
1
  ---
2
  license: mit
3
  ---
4
- ToDo:
5
 
6
- ## Description
7
 
8
- &lt; Flow description &gt;
 
 
 
 
 
9
 
10
- ## Configuration parameters
11
 
12
- &lt; Name 1 &gt; (&lt; Type 1 &gt;): &lt; Description 1 &gt;. Required parameter.
13
 
14
- &lt; Name 2 &gt; (&lt; Type 2 &gt;): &lt; Description 2 &gt;. Default value is: &lt; value 2 &gt;
15
 
16
- ## Input interface
17
 
18
- &lt; Name 1 &gt; (&lt; Type 1 &gt;): &lt; Description 1 &gt;.
 
 
19
 
20
- (Note that the interface might depend on the state of the Flow.)
21
 
22
- ## Output interface
 
 
 
23
 
24
- &lt; Name 1 &gt; (&lt; Type 1 &gt;): &lt; Description 1 &gt;.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- (Note that the interface might depend on the state of the Flow.)
 
1
  ---
2
  license: mit
3
  ---
 
4
 
5
+ # Table of Contents
6
 
7
+ * [AutoGPTFlow](#AutoGPTFlow)
8
+ * [AutoGPTFlow](#AutoGPTFlow.AutoGPTFlow)
9
+ * [prepare\_memory\_read\_input](#AutoGPTFlow.AutoGPTFlow.prepare_memory_read_input)
10
+ * [prepare\_memory\_read\_output](#AutoGPTFlow.AutoGPTFlow.prepare_memory_read_output)
11
+ * [detect\_finish\_or\_continue](#AutoGPTFlow.AutoGPTFlow.detect_finish_or_continue)
12
+ * [detect\_finish\_in\_human\_input](#AutoGPTFlow.AutoGPTFlow.detect_finish_in_human_input)
13
 
14
+ <a id="AutoGPTFlow"></a>
15
 
16
+ # AutoGPTFlow
17
 
18
+ <a id="AutoGPTFlow.AutoGPTFlow"></a>
19
 
20
+ ## AutoGPTFlow Objects
21
 
22
+ ```python
23
+ class AutoGPTFlow(CircularFlow)
24
+ ```
25
 
26
+ This class implements a (very basic) AutoGPT flow. It is a flow that consists of multiple sub-flows that are executed circularly. It Contains the following subflows:
27
 
28
+ - A Controller Flow: A Flow that controls which subflow of the Executor Flow to execute next.
29
+ - A Memory Flow: A Flow used to save and retrieve messages or memories which might be useful for the Controller Flow.
30
+ - A HumanFeedback Flow: A flow use to get feedback from the user/human.
31
+ - A Executor Flow: A Flow that executes commands generated by the Controller Flow. Typically it's a branching flow (see BranchingFlow) and the commands are which branch to execute next.
32
 
33
+ An illustration of the flow is as follows:
34
+
35
+ | -------> Memory Flow -------> Controller Flow ------->|
36
+ ^ |
37
+ | |
38
+ | v
39
+ | <----- HumanFeedback Flow <------- Executor Flow <----|
40
+
41
+ *Configuration Parameters*:
42
+
43
+ - `name` (str): The name of the flow. Default is "AutoGPTFlow".
44
+ - `description` (str): A description of the flow. Default is "An example implementation of AutoGPT with Flows."
45
+ - `max_rounds` (int): The maximum number of rounds the circular flow can run for. Default is 30.
46
+ - `early_exit_key` (str): The key that is used to terminate the flow early. Default is "EARLY_EXIT".
47
+ - `subflows_config` (Dict[str,Any]): A dictionary of subflows configurations. Default:
48
+ - `Controller` (Dict[str,Any]): The configuration of the Controller Flow. By default the controller flow is a ControllerAtomicFlow (see ControllerExecutorFlowModule). It's default values are
49
+ defined in ControllerAtomicFlow.yaml of the ControllerExecutorFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
50
+ - `finish` (Dict[str,Any]): The configuration of the finish command (used to terminate the flow early when the controller has accomplished its goal).
51
+ - `description` (str): The description of the command. Default is "The finish command is used to terminate the flow early when the controller has accomplished its goal."
52
+ - `input_args` (List[str]): The list of expected keys to run the finish command. Default is ["answer"].
53
+ - `human_message_prompt_template`(Dict[str,Any]): The prompt template used to generate the message that is shown to the user/human when the finish command is executed. Default is:
54
+ - `template` (str): The template of the humand message prompt (see AutoGPTFlow.yaml for default template)
55
+ - `input_variables` (List[str]): The list of variables to be included in the template. Default is ["observation", "human_feedback", "memory"].
56
+ - `ìnput_interface_initialized` (List[str]): The input interface that Controller Flow expects except for the first time in the flow. Default is ["observation", "human_feedback", "memory"].
57
+ - `Executor` (Dict[str,Any]): The configuration of the Executor Flow. By default the executor flow is a Branching Flow (see BranchingFlow). It's default values are the default values of the BranchingFlow. Fields to define:
58
+ - `subflows_config` (Dict[str,Any]): A Dictionary of subflows configurations.The keys are the names of the subflows and the values are the configurations of the subflows. Each subflow is a branch of the branching flow.
59
+ - `HumanFeedback` (Dict[str,Any]): The configuration of the HumanFeedback Flow. By default the human feedback flow is a HumanStandardInputFlow (see HumanStandardInputFlowModule ).
60
+ It's default values are specified in the REAMDE.md of HumanStandardInputFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
61
+ - `request_multi_line_input_flag` (bool): Flag to request multi-line input. Default is False.
62
+ - `query_message_prompt_template` (Dict[str,Any]): The prompt template presented to the user/human to request input. Default is:
63
+ - `template` (str): The template of the query message prompt (see AutoGPTFlow.yaml for default template)
64
+ - `input_variables` (List[str]): The list of variables to be included in the template. Default is ["goal","command","command_args",observation"]
65
+ - input_interface_initialized (List[str]): The input interface that HumanFeeback Flow expects except for the first time in the flow. Default is ["goal","command","command_args",observation"]
66
+ - `Memory` (Dict[str,Any]): The configuration of the Memory Flow. By default the memory flow is a ChromaDBFlow (see VectorStoreFlowModule). It's default values are defined in ChromaDBFlow.yaml of the VectorStoreFlowModule. Except for the following parameters who are overwritten by the AutoGPTFlow in AutoGPTFlow.yaml:
67
+ - `n_results`: The number of results to retrieve from the memory. Default is 2.
68
+ - `topology` (List[Dict[str,Any]]): The topology of the flow which is "circular". By default, the topology is the one shown in the illustration above (the topology is also described in AutoGPTFlow.yaml).
69
+
70
+
71
+ *Input Interface*:
72
+
73
+ - `goal` (str): The goal of the flow.
74
+
75
+ *Output Interface*:
76
+
77
+ - `answer` (str): The answer of the flow.
78
+ - `status` (str): The status of the flow. It can be "finished" or "unfinished".
79
+
80
+ **Arguments**:
81
+
82
+ - `flow_config` (`Dict[str,Any]`): The configuration of the flow. Contains the parameters described above and the parameters required by the parent class (CircularFlow).
83
+ - `subflows` (`List[Flow]`): A list of subflows constituating the circular flow. Required when instantiating the subflow programmatically (it replaces subflows_config from flow_config).
84
+
85
+ <a id="AutoGPTFlow.AutoGPTFlow.prepare_memory_read_input"></a>
86
+
87
+ #### prepare\_memory\_read\_input
88
+
89
+ ```python
90
+ @CircularFlow.input_msg_payload_builder
91
+ def prepare_memory_read_input(flow_state: Dict[str, Any],
92
+ dst_flow: ChromaDBFlow) -> Dict[str, Any]
93
+ ```
94
+
95
+ This method prepares the input for the Memory Flow. It is called before the Memory Flow is called.
96
+
97
+ A (very) basic example implementation of how the memory retrieval could be constructed.
98
+
99
+ **Arguments**:
100
+
101
+ - `flow_state` (`Dict[str, Any]`): The state of the flow
102
+ - `dst_flow` (`Flow`): The destination flow
103
+
104
+ **Returns**:
105
+
106
+ `Dict[str, Any]`: The input message for the Memory Flow
107
+
108
+ <a id="AutoGPTFlow.AutoGPTFlow.prepare_memory_read_output"></a>
109
+
110
+ #### prepare\_memory\_read\_output
111
+
112
+ ```python
113
+ @CircularFlow.output_msg_payload_processor
114
+ def prepare_memory_read_output(output_payload: Dict[str, Any],
115
+ src_flow: ControllerAtomicFlow)
116
+ ```
117
+
118
+ This method processes the output of the Memory Flow. It is called after the Memory Flow is called.
119
+
120
+ **Arguments**:
121
+
122
+ - `output_payload` (`Dict[str, Any]`): The output payload of the Memory Flow
123
+ - `src_flow` (`Flow`): The source flow
124
+
125
+ **Returns**:
126
+
127
+ `Dict[str, Any]`: The processed output payload
128
+
129
+ <a id="AutoGPTFlow.AutoGPTFlow.detect_finish_or_continue"></a>
130
+
131
+ #### detect\_finish\_or\_continue
132
+
133
+ ```python
134
+ @CircularFlow.output_msg_payload_processor
135
+ def detect_finish_or_continue(
136
+ output_payload: Dict[str, Any],
137
+ src_flow: ControllerAtomicFlow) -> Dict[str, Any]
138
+ ```
139
+
140
+ This method detects whether the Controller flow has generated a "finish" command or not to terminate the flow. . It is called after the Controller Flow is called.
141
+
142
+ **Arguments**:
143
+
144
+ - `output_payload` (`Dict[str, Any]`): The output payload of the Controller Flow
145
+ - `src_flow` (`Flow`): The source flow
146
+
147
+ **Returns**:
148
+
149
+ `Dict[str, Any]`: The processed output payload
150
+
151
+ <a id="AutoGPTFlow.AutoGPTFlow.detect_finish_in_human_input"></a>
152
+
153
+ #### detect\_finish\_in\_human\_input
154
+
155
+ ```python
156
+ @CircularFlow.output_msg_payload_processor
157
+ def detect_finish_in_human_input(
158
+ output_payload: Dict[str, Any],
159
+ src_flow: ControllerAtomicFlow) -> Dict[str, Any]
160
+ ```
161
+
162
+ This method detects whether the HumanFeedback (the human/user) flow has generated a "finish" command or not to terminate the flow. It is called after the HumanFeedback Flow is called.
163
+
164
+ **Arguments**:
165
+
166
+ - `output_payload` (`Dict[str, Any]`): The output payload of the HumanFeedback Flow
167
+ - `src_flow` (`Flow`): The source flow
168
+
169
+ **Returns**:
170
+
171
+ `Dict[str, Any]`: The processed output payload
172
 
 
__init__.py CHANGED
@@ -1,11 +1,11 @@
1
  # ~~~ Specify the dependencies ~~~
2
  dependencies = [
3
  {"url": "aiflows/ControllerExecutorFlowModule",
4
- "revision": "263bf393e165f42d3bc9792976b37c4cf221dd81"},
5
  {"url": "aiflows/HumanStandardInputFlowModule",
6
- "revision": "a690582584ff5345fe768e41558959a7e99bbeee"},
7
  {"url": "aiflows/VectorStoreFlowModule",
8
- "revision": "46d070566496a31ead631ef3f8e8ea2cd5f098f2"},
9
  ]
10
  from flows import flow_verse
11
 
 
1
  # ~~~ Specify the dependencies ~~~
2
  dependencies = [
3
  {"url": "aiflows/ControllerExecutorFlowModule",
4
+ "revision": "09cda9615e5c48ae18e2c1244519ed7321145187"},
5
  {"url": "aiflows/HumanStandardInputFlowModule",
6
+ "revision": "5683a922372c5fa90be9f6447d6662d8d80341fc"},
7
  {"url": "aiflows/VectorStoreFlowModule",
8
+ "revision": "692f5d1b55936d813d4f41e8b0ec11754c7da9ac"},
9
  ]
10
  from flows import flow_verse
11
 
AutoGPT.yaml → demo.yaml RENAMED
@@ -16,6 +16,8 @@ flow:
16
  finish:
17
  description: "Signal that the objective has been satisfied, and returns the answer to the user."
18
  input_args: [ "answer" ]
 
 
19
  human_message_prompt_template:
20
  template: |2-
21
  Here is the response to your last action:
@@ -38,3 +40,8 @@ flow:
38
  _target_: aiflows.LCToolFlowModule.LCToolFlow.instantiate_from_default_config
39
  backend:
40
  _target_: langchain.tools.DuckDuckGoSearchRun
 
 
 
 
 
 
16
  finish:
17
  description: "Signal that the objective has been satisfied, and returns the answer to the user."
18
  input_args: [ "answer" ]
19
+ backend:
20
+ api_infos: ???
21
  human_message_prompt_template:
22
  template: |2-
23
  Here is the response to your last action:
 
40
  _target_: aiflows.LCToolFlowModule.LCToolFlow.instantiate_from_default_config
41
  backend:
42
  _target_: langchain.tools.DuckDuckGoSearchRun
43
+
44
+ Memory:
45
+ backend:
46
+ api_infos: ???
47
+
pip_requirements.txt CHANGED
@@ -1 +1,8 @@
1
- duckduckgo-search==3.9.2
 
 
 
 
 
 
 
 
1
+ # LCToolFLowModule dependency. Needed if you want to run the demo
2
+ duckduckgo-search==3.9.6
3
+ # ControllerExecutorFlowModule dependency. Needed if you want to run the demo
4
+ wikipedia==1.4.0
5
+ # VectorStore FlowModule dependencies. Needed if you want to run the demo
6
+ langchain==0.0.336
7
+ chromadb==0.3.29
8
+ faiss-cpu==1.7.4
run.py CHANGED
@@ -3,7 +3,8 @@ import os
3
  import hydra
4
 
5
  import flows
6
- from flows.flow_launchers import FlowLauncher, ApiInfo
 
7
  from flows.utils.general_helpers import read_yaml_file
8
 
9
  from flows import logging
@@ -16,33 +17,38 @@ logging.set_verbosity_debug()
16
 
17
  dependencies = [
18
  {"url": "aiflows/AutoGPTFlowModule", "revision": os.getcwd()},
19
- {"url": "aiflows/LCToolFlowModule", "revision": "46dd24ecc3dc4f4f0191e57c202cc7d20e8e7782"},
20
  ]
21
  from flows import flow_verse
22
- flow_verse.sync_dependencies(dependencies)
23
 
 
24
  if __name__ == "__main__":
25
  # ~~~ Set the API information ~~~
26
  # OpenAI backend
27
- # api_information = ApiInfo("openai", os.getenv("OPENAI_API_KEY"))
 
28
  # Azure backend
29
- api_information = ApiInfo("azure", os.getenv("AZURE_OPENAI_KEY"), os.getenv("AZURE_OPENAI_ENDPOINT"))
 
 
 
30
 
31
  root_dir = "."
32
- cfg_path = os.path.join(root_dir, "AutoGPT.yaml")
33
  cfg = read_yaml_file(cfg_path)
34
-
 
35
  # ~~~ Instantiate the Flow ~~~
36
  flow_with_interfaces = {
37
  "flow": hydra.utils.instantiate(cfg['flow'], _recursive_=False, _convert_="partial"),
38
  "input_interface": (
39
  None
40
- if getattr(cfg, "input_interface", None) is None
41
  else hydra.utils.instantiate(cfg['input_interface'], _recursive_=False)
42
  ),
43
  "output_interface": (
44
  None
45
- if getattr(cfg, "output_interface", None) is None
46
  else hydra.utils.instantiate(cfg['output_interface'], _recursive_=False)
47
  ),
48
  }
@@ -50,7 +56,7 @@ if __name__ == "__main__":
50
  # ~~~ Get the data ~~~
51
  # data = {"id": 0, "goal": "Answer the following question: What is the population of Canada?"} # Uses wikipedia
52
  # data = {"id": 0, "goal": "Answer the following question: Who was the NBA champion in 2023?"} # Uses duckduckgo
53
- data = {"id": 0, "goal": "Answer the following question: What is the date of birth of Michael Jordan?"}
54
  # At first, we retrieve information about Michael Jordan the basketball player
55
  # If we provide feedback, only in the first round, that we are not interested in the basketball player,
56
  # but the statistician, and skip the feedback in the next rounds, we get the correct answer
@@ -63,7 +69,6 @@ if __name__ == "__main__":
63
  flow_with_interfaces=flow_with_interfaces,
64
  data=data,
65
  path_to_output_file=path_to_output_file,
66
- api_information=api_information,
67
  )
68
 
69
  # ~~~ Print the output ~~~
 
3
  import hydra
4
 
5
  import flows
6
+ from flows.flow_launchers import FlowLauncher
7
+ from flows.backends.api_info import ApiInfo
8
  from flows.utils.general_helpers import read_yaml_file
9
 
10
  from flows import logging
 
17
 
18
  dependencies = [
19
  {"url": "aiflows/AutoGPTFlowModule", "revision": os.getcwd()},
20
+ {"url": "aiflows/LCToolFlowModule", "revision": "f1020b23fe2a1ab6157c3faaf5b91b5cdaf02c1b"},
21
  ]
22
  from flows import flow_verse
 
23
 
24
+ flow_verse.sync_dependencies(dependencies)
25
  if __name__ == "__main__":
26
  # ~~~ Set the API information ~~~
27
  # OpenAI backend
28
+ api_information = [ApiInfo(backend_used="openai",
29
+ api_key = os.getenv("OPENAI_API_KEY"))]
30
  # Azure backend
31
+ # api_information = ApiInfo(backend_used = "azure",
32
+ # api_base = os.getenv("AZURE_API_BASE"),
33
+ # api_key = os.getenv("AZURE_OPENAI_KEY"),
34
+ # api_version = os.getenv("AZURE_API_VERSION") )
35
 
36
  root_dir = "."
37
+ cfg_path = os.path.join(root_dir, "demo.yaml")
38
  cfg = read_yaml_file(cfg_path)
39
+ cfg["flow"]["subflows_config"]["Controller"]["backend"]["api_infos"] = api_information
40
+ cfg["flow"]["subflows_config"]["Memory"]["backend"]["api_infos"] = api_information
41
  # ~~~ Instantiate the Flow ~~~
42
  flow_with_interfaces = {
43
  "flow": hydra.utils.instantiate(cfg['flow'], _recursive_=False, _convert_="partial"),
44
  "input_interface": (
45
  None
46
+ if cfg.get( "input_interface", None) is None
47
  else hydra.utils.instantiate(cfg['input_interface'], _recursive_=False)
48
  ),
49
  "output_interface": (
50
  None
51
+ if cfg.get( "output_interface", None) is None
52
  else hydra.utils.instantiate(cfg['output_interface'], _recursive_=False)
53
  ),
54
  }
 
56
  # ~~~ Get the data ~~~
57
  # data = {"id": 0, "goal": "Answer the following question: What is the population of Canada?"} # Uses wikipedia
58
  # data = {"id": 0, "goal": "Answer the following question: Who was the NBA champion in 2023?"} # Uses duckduckgo
59
+ data = {"id": 0, "goal": "Answer the following question: What is the profession and date of birth of Michael Jordan?"}
60
  # At first, we retrieve information about Michael Jordan the basketball player
61
  # If we provide feedback, only in the first round, that we are not interested in the basketball player,
62
  # but the statistician, and skip the feedback in the next rounds, we get the correct answer
 
69
  flow_with_interfaces=flow_with_interfaces,
70
  data=data,
71
  path_to_output_file=path_to_output_file,
 
72
  )
73
 
74
  # ~~~ Print the output ~~~