nbaldwin commited on
Commit
30754bf
1 Parent(s): 24d2f36

coflows-compatible

Browse files
Files changed (5) hide show
  1. AutoGPTFlow.py +146 -101
  2. AutoGPTFlow.yaml +10 -57
  3. __init__.py +1 -1
  4. demo.yaml +38 -34
  5. run.py +56 -33
AutoGPTFlow.py CHANGED
@@ -1,17 +1,15 @@
1
  import sys
2
  from typing import Dict, Any
3
 
4
- from aiflows.base_flows import CircularFlow
5
  from aiflows.utils import logging
6
 
7
  logging.set_verbosity_debug()
8
 
9
  log = logging.get_logger(__name__)
 
 
10
 
11
- from flow_modules.aiflows.ControllerExecutorFlowModule import ControllerAtomicFlow
12
- from flow_modules.aiflows.VectorStoreFlowModule import ChromaDBFlow
13
-
14
- class AutoGPTFlow(CircularFlow):
15
  """ This class implements a (very basic) AutoGPT flow. It is a flow that consists of multiple sub-flows that are executed circularly. It Contains the following subflows:
16
 
17
  - A Controller Flow: A Flow that controls which subflow of the Executor Flow to execute next.
@@ -71,15 +69,136 @@ class AutoGPTFlow(CircularFlow):
71
  :param subflows: A list of subflows constituating the circular flow. Required when instantiating the subflow programmatically (it replaces subflows_config from flow_config).
72
  :type subflows: List[Flow]
73
  """
74
- def _on_reach_max_round(self):
75
- """ This method is called when the flow reaches the max_rounds."""
76
- self._state_update_dict({
77
- "answer": "The maximum amount of rounds was reached before the model found an answer.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  "status": "unfinished"
79
- })
80
-
81
- @staticmethod
82
- def _get_memory_key(flow_state):
83
  """ This method returns the memory key that is used to retrieve memories from the ChromaDB model.
84
 
85
  :param flow_state: The state of the flow
@@ -87,11 +206,11 @@ class AutoGPTFlow(CircularFlow):
87
  :return: The current context
88
  :rtype: str
89
  """
90
- goal = flow_state.get("goal")
91
- last_command = flow_state.get("command")
92
- last_command_args = flow_state.get("command_args")
93
- last_observation = flow_state.get("observation")
94
- last_human_feedback = flow_state.get("human_feedback")
95
 
96
  if last_command is None:
97
  return ""
@@ -118,99 +237,25 @@ class AutoGPTFlow(CircularFlow):
118
 
119
  return current_context
120
 
121
- @CircularFlow.input_msg_payload_builder
122
- def prepare_memory_read_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
123
- """ This method prepares the input for the Memory Flow. It is called before the Memory Flow is called.
124
- A (very) basic example implementation of how the memory retrieval could be constructed.
125
-
126
- :param flow_state: The state of the flow
127
- :type flow_state: Dict[str, Any]
128
- :param dst_flow: The destination flow
129
- :type dst_flow: Flow
130
- :return: The input message for the Memory Flow
131
- :rtype: Dict[str, Any]
132
- """
133
- query = self._get_memory_key(flow_state)
134
 
135
  return {
136
  "operation": "read",
137
  "content": query
138
  }
139
 
140
- @CircularFlow.output_msg_payload_processor
141
- def prepare_memory_read_output(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow):
142
- """ This method processes the output of the Memory Flow. It is called after the Memory Flow is called.
143
-
144
- :param output_payload: The output payload of the Memory Flow
145
- :type output_payload: Dict[str, Any]
146
- :param src_flow: The source flow
147
- :type src_flow: Flow
148
- :return: The processed output payload
149
- :rtype: Dict[str, Any]
150
- """
151
- retrieved_memories = output_payload["retrieved"][0][1:]
152
  return {"memory": "\n".join(retrieved_memories)}
153
 
154
- @CircularFlow.input_msg_payload_builder
155
- def prepare_memory_write_input(self, flow_state: Dict[str, Any], dst_flow: ChromaDBFlow) -> Dict[str, Any]:
156
- """ This method prepares the input for the Memory Flow. It is called before the Memory Flow is called.
157
- A (very) basic example implementation of how the memory population could be constructed.
158
-
159
- :param flow_state: The state of the flow
160
- :type flow_state: Dict[str, Any]
161
- :param dst_flow: The destination flow
162
- :type dst_flow: Flow
163
- :return: The input message to write the Memory Flow
164
- :rtype: Dict[str, Any]
165
- """""
166
- query = self._get_memory_key(flow_state)
167
 
168
  return {
169
  "operation": "write",
170
  "content": str(query)
171
  }
172
-
173
- @CircularFlow.output_msg_payload_processor
174
- def detect_finish_or_continue(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
175
- str, Any]:
176
- """ This method detects whether the Controller flow has generated a "finish" command or not to terminate the flow. . It is called after the Controller Flow is called.
177
-
178
- :param output_payload: The output payload of the Controller Flow
179
- :type output_payload: Dict[str, Any]
180
- :param src_flow: The source flow
181
- :type src_flow: Flow
182
- :return: The processed output payload
183
- :rtype: Dict[str, Any]
184
- """
185
- command = output_payload["command"]
186
- if command == "finish":
187
- return {
188
- "EARLY_EXIT": True,
189
- "answer": output_payload["command_args"]["answer"],
190
- "status": "finished"
191
- }
192
- else:
193
- return output_payload
194
-
195
- @CircularFlow.output_msg_payload_processor
196
- def detect_finish_in_human_input(self, output_payload: Dict[str, Any], src_flow: ControllerAtomicFlow) -> Dict[
197
- str, Any]:
198
- """ This method detects whether the HumanFeedback (the human/user) flow has generated a "finish" command or not to terminate the flow. It is called after the HumanFeedback Flow is called.
199
-
200
- :param output_payload: The output payload of the HumanFeedback Flow
201
- :type output_payload: Dict[str, Any]
202
- :param src_flow: The source flow
203
- :type src_flow: Flow
204
- :return: The processed output payload
205
- :rtype: Dict[str, Any]
206
- """
207
-
208
- human_feedback = output_payload["human_input"]
209
- if human_feedback.strip().lower() == "q":
210
- return {
211
- "EARLY_EXIT": True,
212
- "answer": "The user has chosen to exit before a final answer was generated.",
213
- "status": "unfinished"
214
- }
215
-
216
- return {"human_feedback": human_feedback}
 
1
  import sys
2
  from typing import Dict, Any
3
 
 
4
  from aiflows.utils import logging
5
 
6
  logging.set_verbosity_debug()
7
 
8
  log = logging.get_logger(__name__)
9
+ from aiflows.interfaces import KeyInterface
10
+ from flow_modules.aiflows.ControllerExecutorFlowModule import ControllerExecutorFlow
11
 
12
+ class AutoGPTFlow(ControllerExecutorFlow):
 
 
 
13
  """ This class implements a (very basic) AutoGPT flow. It is a flow that consists of multiple sub-flows that are executed circularly. It Contains the following subflows:
14
 
15
  - A Controller Flow: A Flow that controls which subflow of the Executor Flow to execute next.
 
69
  :param subflows: A list of subflows constituating the circular flow. Required when instantiating the subflow programmatically (it replaces subflows_config from flow_config).
70
  :type subflows: List[Flow]
71
  """
72
+
73
+
74
+ def __init__(self, **kwargs):
75
+ super().__init__( **kwargs)
76
+ self.rename_human_output_interface = KeyInterface(
77
+ keys_to_rename={"human_input": "human_feedback"}
78
+ )
79
+
80
+ def set_up_flow_state(self):
81
+ super().set_up_flow_state()
82
+ self.flow_state["early_exit_flag"] = True
83
+ self.flow_state["is_first_round"] = True
84
+
85
+ def memory_read_step(self):
86
+ memory_read_input = self.prepare_memory_read_input()
87
+ output = self.ask_subflow("Memory", memory_read_input).get_data()
88
+ memory_read_output = self.prepare_memory_read_output(output)
89
+
90
+ return memory_read_output
91
+
92
+ def memory_write_step(self):
93
+ memory_write_input = self.prepare_memory_write_input()
94
+ self.tell_subflow("Memory", memory_write_input)
95
+
96
+ def controller_executor_step(self, output_memory_retrieval):
97
+
98
+ if self.flow_state["is_first_round"]:
99
+ additional_input_ctrl_ex = {
100
+ "goal": self.flow_state["goal"],
101
+ }
102
+ else:
103
+ additional_input_ctrl_ex = {
104
+ "observation": self.flow_state["observation"],
105
+ "human_feedback": self.flow_state["human_feedback"],
106
+ }
107
+
108
+ input_ctrl_ex = {"executor_reply": {**output_memory_retrieval,**additional_input_ctrl_ex}}
109
+
110
+ output_ctrl_ex = self._single_round_controller_executor(input_ctrl_ex)
111
+
112
+ self.flow_state["early_exit_flag"] = output_ctrl_ex.get("EARLY_EXIT",False)
113
+
114
+ if self.flow_state["early_exit_flag"]:
115
+ return output_ctrl_ex
116
+
117
+ controller_reply = output_ctrl_ex["controller_reply"]
118
+ executor_reply = output_ctrl_ex["executor_reply"]
119
+
120
+ self._state_update_dict(
121
+ {
122
+ "command": controller_reply["command"],
123
+ "command_args": controller_reply["command_args"],
124
+ "observation": executor_reply["observation"],
125
+ }
126
+ )
127
+ return output_ctrl_ex
128
+
129
+
130
+ def human_feedback_step(self):
131
+
132
+ human_feedback_input_variables = {
133
+ "goal": self.flow_state["goal"],
134
+ "command": self.flow_state["command"],
135
+ "command_args": self.flow_state["command_args"],
136
+ "observation": self.flow_state["observation"],
137
+ }
138
+
139
+ human_feedback = self.rename_human_output_interface(
140
+ self.ask_subflow("HumanFeedback", human_feedback_input_variables).get_data()
141
+ )
142
+
143
+ self.flow_state["human_feedback"] = human_feedback["human_feedback"]
144
+
145
+ if human_feedback["human_feedback"].strip().lower() == "q":
146
+ self.flow_state["early_exit_flag"] = True
147
+ return {
148
+ "EARLY_EXIT": True,
149
+ "answer": "The user has chosen to exit before a final answer was generated.",
150
+ "status": "unfinished",
151
+ }
152
+
153
+ return human_feedback
154
+
155
+
156
+ def _single_round_autogpt(self):
157
+
158
+
159
+ #1. Memory Retrieval
160
+ output_memory_retrieval = self.memory_read_step()
161
+
162
+
163
+ #2. ControllerExecutor
164
+ output_ctrl_ex = self.controller_executor_step(output_memory_retrieval)
165
+
166
+ if self.flow_state["early_exit_flag"]:
167
+ return output_ctrl_ex
168
+
169
+ #3. HumanFeedback
170
+ output_human_feedback = self.human_feedback_step()
171
+
172
+ if self.flow_state["early_exit_flag"]:
173
+ return output_human_feedback
174
+
175
+ #4. Memory Write
176
+ self.memory_write_step()
177
+
178
+ return {** output_ctrl_ex, **output_human_feedback}
179
+
180
+
181
+
182
+
183
+
184
+ def run(self,input_data):
185
+ self._state_update_dict({"goal": input_data["goal"]})
186
+
187
+ for round in range(self.flow_config["max_rounds"]):
188
+ output = self._single_round_autogpt()
189
+
190
+ self.flow_state["is_first_round"] = False
191
+
192
+ if self.flow_state["early_exit_flag"]:
193
+ return output
194
+
195
+ return {
196
+ "EARLY_EXIT": False,
197
+ "answer": output,
198
  "status": "unfinished"
199
+ }
200
+
201
+ def _get_memory_key(self):
 
202
  """ This method returns the memory key that is used to retrieve memories from the ChromaDB model.
203
 
204
  :param flow_state: The state of the flow
 
206
  :return: The current context
207
  :rtype: str
208
  """
209
+ goal = self.flow_state.get("goal")
210
+ last_command = self.flow_state.get("command")
211
+ last_command_args = self.flow_state.get("command_args")
212
+ last_observation = self.flow_state.get("observation")
213
+ last_human_feedback = self.flow_state.get("human_feedback")
214
 
215
  if last_command is None:
216
  return ""
 
237
 
238
  return current_context
239
 
240
+ def prepare_memory_read_input(self) -> Dict[str, Any]:
241
+
242
+ query = self._get_memory_key()
 
 
 
 
 
 
 
 
 
 
243
 
244
  return {
245
  "operation": "read",
246
  "content": query
247
  }
248
 
249
+ def prepare_memory_read_output(self, data: Dict[str, Any]):
250
+
251
+ retrieved_memories = data["retrieved"][0][1:]
 
 
 
 
 
 
 
 
 
252
  return {"memory": "\n".join(retrieved_memories)}
253
 
254
+ def prepare_memory_write_input(self) -> Dict[str, Any]:
255
+
256
+ query = self._get_memory_key()
 
 
 
 
 
 
 
 
 
 
257
 
258
  return {
259
  "operation": "write",
260
  "content": str(query)
261
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
AutoGPTFlow.yaml CHANGED
@@ -43,18 +43,18 @@ subflows_config:
43
  - "human_feedback"
44
  - "memory"
45
 
46
- Executor:
47
- _target_: aiflows.base_flows.BranchingFlow.instantiate_from_default_config
48
- # E.g.,
49
- # subflows_config:
50
- # wiki_search:
51
- # _target_: .WikiSearchAtomicFlow.instantiate_from_default_config
52
- # ddg_search:
53
- # _target_: flows.application_flows.LCToolFlowModule.LCToolFlow.instantiate_from_default_config
54
- # backend:
55
- # _target_: langchain.tools.DuckDuckGoSearchRun
56
 
57
  HumanFeedback:
 
 
58
  _target_: flow_modules.aiflows.HumanStandardInputFlowModule.HumanStandardInputFlow.instantiate_from_default_config
59
  request_multi_line_input_flag: False
60
  query_message_prompt_template:
@@ -88,50 +88,3 @@ subflows_config:
88
  _target_: flow_modules.aiflows.VectorStoreFlowModule.ChromaDBFlow.instantiate_from_default_config
89
  n_results: 2
90
 
91
- topology:
92
- - goal: "Retrieve relevant information from memory."
93
- input_interface:
94
- _target_: AutoGPTFlow.prepare_memory_read_input # An interface defined as a function in the Flow class
95
- flow: Memory
96
- output_interface:
97
- _target_: AutoGPTFlow.prepare_memory_read_output
98
- reset: false
99
-
100
- - goal: "Select the next action and prepare the input for the executor."
101
- input_interface:
102
- _target_: aiflows.interfaces.KeyInterface
103
- additional_transformations:
104
- - _target_: aiflows.data_transformations.KeyMatchInput
105
- flow: Controller
106
- output_interface:
107
- _target_: AutoGPTFlow.detect_finish_or_continue # An interface defined as a function in the Flow class
108
- reset: false
109
-
110
- - goal: "Execute the action specified by the Controller."
111
- input_interface:
112
- _target_: aiflows.interfaces.KeyInterface
113
- keys_to_rename:
114
- command: branch
115
- command_args: branch_input_data
116
- keys_to_select: ["branch", "branch_input_data"]
117
- flow: Executor
118
- output_interface:
119
- _target_: aiflows.interfaces.KeyInterface
120
- keys_to_rename:
121
- branch_output_data: observation
122
- keys_to_select: ["observation"]
123
- reset: false
124
-
125
- - goal: "Ask the user for feedback."
126
- input_interface:
127
- _target_: aiflows.interfaces.KeyInterface
128
- flow: HumanFeedback
129
- output_interface:
130
- _target_: AutoGPTFlow.detect_finish_in_human_input
131
- reset: false
132
-
133
- - goal: "Write relevant information to memory"
134
- input_interface:
135
- _target_: AutoGPTFlow.prepare_memory_write_input # An interface defined as a function in the Flow class
136
- flow: Memory
137
- reset: false
 
43
  - "human_feedback"
44
  - "memory"
45
 
46
+
47
+
48
+ # wiki_search:
49
+ # _target_: .WikiSearchAtomicFlow.instantiate_from_default_config
50
+ # ddg_search:
51
+ # _target_: flows.application_flows.LCToolFlowModule.LCToolFlow.instantiate_from_default_config
52
+ # backend:
53
+ # _target_: langchain.tools.DuckDuckGoSearchRun
 
 
54
 
55
  HumanFeedback:
56
+ name: HumanFeedbackFlow
57
+ description: "A flow that requests feedback from a human."
58
  _target_: flow_modules.aiflows.HumanStandardInputFlowModule.HumanStandardInputFlow.instantiate_from_default_config
59
  request_multi_line_input_flag: False
60
  query_message_prompt_template:
 
88
  _target_: flow_modules.aiflows.VectorStoreFlowModule.ChromaDBFlow.instantiate_from_default_config
89
  n_results: 2
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
__init__.py CHANGED
@@ -1,7 +1,7 @@
1
  # ~~~ Specify the dependencies ~~~
2
  dependencies = [
3
  {"url": "aiflows/ControllerExecutorFlowModule",
4
- "revision": "main"},
5
  {"url": "aiflows/HumanStandardInputFlowModule",
6
  "revision": "main"},
7
  {"url": "aiflows/VectorStoreFlowModule",
 
1
  # ~~~ Specify the dependencies ~~~
2
  dependencies = [
3
  {"url": "aiflows/ControllerExecutorFlowModule",
4
+ "revision": "coflows"},
5
  {"url": "aiflows/HumanStandardInputFlowModule",
6
  "revision": "main"},
7
  {"url": "aiflows/VectorStoreFlowModule",
demo.yaml CHANGED
@@ -1,41 +1,45 @@
1
- flow:
2
- _target_: flow_modules.aiflows.AutoGPTFlowModule.AutoGPTFlow.instantiate_from_default_config
3
- max_rounds: 30
4
 
5
- ### Subflows specification
6
- subflows_config:
7
- Controller:
8
- _target_: flow_modules.aiflows.ControllerExecutorFlowModule.ControllerAtomicFlow.instantiate_from_default_config
9
- commands:
10
- wiki_search:
11
- description: "Performs a search on Wikipedia."
12
- input_args: [ "search_term" ]
13
- finish:
14
- description: "Signal that the objective has been satisfied, and returns the answer to the user."
15
- input_args: [ "answer" ]
16
- backend:
17
- api_infos: ???
18
- human_message_prompt_template:
19
- template: |2-
20
- Here is the response to your last action:
21
- {{observation}}
22
- Here is the feedback from the user:
23
- {{human_feedback}}
24
- input_variables:
25
- - "observation"
26
- - "human_feedback"
27
- input_interface_initialized:
28
  - "observation"
29
  - "human_feedback"
 
 
 
30
 
31
- Executor:
32
- _target_: aiflows.base_flows.BranchingFlow.instantiate_from_default_config
33
- subflows_config:
34
- wiki_search:
35
- _target_: flow_modules.aiflows.ControllerExecutorFlowModule.WikiSearchAtomicFlow.instantiate_from_default_config
36
 
37
 
38
- Memory:
39
- backend:
40
- api_infos: ???
 
 
 
 
 
 
41
 
 
1
+ _target_: flow_modules.aiflows.AutoGPTFlowModule.AutoGPTFlow.instantiate_from_default_config
2
+ max_rounds: 30
 
3
 
4
+ ### Subflows specification
5
+ subflows_config:
6
+ #ControllerFlow Configuration
7
+ Controller:
8
+ _target_: flow_modules.aiflows.ControllerExecutorFlowModule.ControllerAtomicFlow.instantiate_from_default_config
9
+ commands:
10
+ wiki_search:
11
+ description: "Performs a search on Wikipedia."
12
+ input_args: ["search_term"]
13
+ finish:
14
+ description: "Signal that the objective has been satisfied, and returns the answer to the user."
15
+ input_args: ["answer"]
16
+ backend:
17
+ api_infos: ???
18
+ human_message_prompt_template:
19
+ template: |2-
20
+ Here is the response to your last action:
21
+ {{observation}}
22
+ Here is the feedback from the user:
23
+ {{human_feedback}}
24
+ input_variables:
 
 
25
  - "observation"
26
  - "human_feedback"
27
+ input_interface_initialized:
28
+ - "observation"
29
+ - "human_feedback"
30
 
31
+ previous_messages:
32
+ last_k: 1
33
+ first_k: 2
 
 
34
 
35
 
36
+ wiki_search:
37
+ _target_: flow_modules.aiflows.ControllerExecutorFlowModule.WikiSearchAtomicFlow.instantiate_from_default_config
38
+
39
+
40
+ #MemoryFlow Configuration
41
+ Memory:
42
+ backend:
43
+ model_name: none
44
+ api_infos: ???
45
 
run.py CHANGED
@@ -5,72 +5,95 @@ import hydra
5
  import aiflows
6
  from aiflows.flow_launchers import FlowLauncher
7
  from aiflows.backends.api_info import ApiInfo
8
- from aiflows.utils.general_helpers import read_yaml_file
9
 
10
  from aiflows import logging
11
  from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache
 
 
 
 
12
 
13
  CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
14
  # clear_cache() # Uncomment this line to clear the cache
15
 
16
  logging.set_verbosity_debug()
17
 
 
 
18
  dependencies = [
19
  {"url": "aiflows/AutoGPTFlowModule", "revision": os.getcwd()},
20
- {"url": "aiflows/LCToolFlowModule", "revision": "main"},
21
  ]
22
- from aiflows import flow_verse
23
 
24
  flow_verse.sync_dependencies(dependencies)
25
  if __name__ == "__main__":
26
  # ~~~ Set the API information ~~~
27
  # OpenAI backend
28
- api_information = [ApiInfo(backend_used="openai",
29
- api_key = os.getenv("OPENAI_API_KEY"))]
30
  # Azure backend
31
  # api_information = ApiInfo(backend_used = "azure",
32
  # api_base = os.getenv("AZURE_API_BASE"),
33
  # api_key = os.getenv("AZURE_OPENAI_KEY"),
34
  # api_version = os.getenv("AZURE_API_VERSION") )
 
 
 
 
 
 
 
 
 
 
35
 
36
  root_dir = "."
37
  cfg_path = os.path.join(root_dir, "demo.yaml")
38
  cfg = read_yaml_file(cfg_path)
39
- cfg["flow"]["subflows_config"]["Controller"]["backend"]["api_infos"] = api_information
40
- cfg["flow"]["subflows_config"]["Memory"]["backend"]["api_infos"] = api_information
41
- # ~~~ Instantiate the Flow ~~~
42
- flow_with_interfaces = {
43
- "flow": hydra.utils.instantiate(cfg['flow'], _recursive_=False, _convert_="partial"),
44
- "input_interface": (
45
- None
46
- if cfg.get( "input_interface", None) is None
47
- else hydra.utils.instantiate(cfg['input_interface'], _recursive_=False)
48
- ),
49
- "output_interface": (
50
- None
51
- if cfg.get( "output_interface", None) is None
52
- else hydra.utils.instantiate(cfg['output_interface'], _recursive_=False)
53
- ),
54
- }
55
 
56
  # ~~~ Get the data ~~~
57
  # data = {"id": 0, "goal": "Answer the following question: What is the population of Canada?"} # Uses wikipedia
58
  # data = {"id": 0, "goal": "Answer the following question: Who was the NBA champion in 2023?"} # Uses duckduckgo
59
- data = {"id": 0, "goal": "Answer the following question: What is the profession and date of birth of Michael Jordan?"}
 
 
 
60
  # At first, we retrieve information about Michael Jordan the basketball player
61
  # If we provide feedback, only in the first round, that we are not interested in the basketball player,
62
  # but the statistician, and skip the feedback in the next rounds, we get the correct answer
63
 
64
  # ~~~ Run inference ~~~
65
- path_to_output_file = None
66
- # path_to_output_file = "output.jsonl" # Uncomment this line to save the output to disk
67
-
68
- _, outputs = FlowLauncher.launch(
69
- flow_with_interfaces=flow_with_interfaces,
70
- data=data,
71
- path_to_output_file=path_to_output_file,
72
- )
73
-
74
  # ~~~ Print the output ~~~
75
- flow_output_data = outputs[0]
76
- print(flow_output_data)
 
 
 
 
 
 
 
 
 
5
  import aiflows
6
  from aiflows.flow_launchers import FlowLauncher
7
  from aiflows.backends.api_info import ApiInfo
8
+ from aiflows.utils.general_helpers import read_yaml_file, quick_load_api_keys
9
 
10
  from aiflows import logging
11
  from aiflows.flow_cache import CACHING_PARAMETERS, clear_cache
12
+ from aiflows.utils import serve_utils
13
+ from aiflows.workers import run_dispatch_worker_thread
14
+ from aiflows.messages import FlowMessage
15
+ from aiflows.interfaces import KeyInterface
16
 
17
  CACHING_PARAMETERS.do_caching = False # Set to True in order to disable caching
18
  # clear_cache() # Uncomment this line to clear the cache
19
 
20
  logging.set_verbosity_debug()
21
 
22
+ from aiflows import flow_verse
23
+ # ~~~ Load Flow dependecies from FlowVerse ~~~
24
  dependencies = [
25
  {"url": "aiflows/AutoGPTFlowModule", "revision": os.getcwd()},
26
+ {"url": "aiflows/LCToolFlowModule", "revision": "80c0c76181d90846ebff1057b8951d9689f93b62"},
27
  ]
 
28
 
29
  flow_verse.sync_dependencies(dependencies)
30
  if __name__ == "__main__":
31
  # ~~~ Set the API information ~~~
32
  # OpenAI backend
33
+ api_information = [ApiInfo(backend_used="openai", api_key=os.getenv("OPENAI_API_KEY"))]
 
34
  # Azure backend
35
  # api_information = ApiInfo(backend_used = "azure",
36
  # api_base = os.getenv("AZURE_API_BASE"),
37
  # api_key = os.getenv("AZURE_OPENAI_KEY"),
38
  # api_version = os.getenv("AZURE_API_VERSION") )
39
+
40
+ FLOW_MODULES_PATH = "./"
41
+
42
+ jwt = os.getenv("COLINK_JWT")
43
+ addr = os.getenv("LOCAL_COLINK_ADDRESS")
44
+
45
+ cl = serve_utils.start_colink_component(
46
+ "Reverse Number Demo",
47
+ {"jwt": jwt, "addr": addr}
48
+ )
49
 
50
  root_dir = "."
51
  cfg_path = os.path.join(root_dir, "demo.yaml")
52
  cfg = read_yaml_file(cfg_path)
53
+
54
+ serve_utils.recursive_serve_flow(
55
+ cl = cl,
56
+ flow_type="demo_served",
57
+ default_config=cfg,
58
+ default_state=None,
59
+ default_dispatch_point="coflows_dispatch",
60
+ )
61
+
62
+ #in case you haven't started the dispatch worker thread, uncomment the 2 lines
63
+ #run_dispatch_worker_thread(cl, dispatch_point="coflows_dispatch", flow_modules_base_path=FLOW_MODULES_PATH)
64
+ #run_dispatch_worker_thread(cl, dispatch_point="coflows_dispatch", flow_modules_base_path=FLOW_MODULES_PATH)
65
+
66
+ quick_load_api_keys(cfg, api_information, key="api_infos")
67
+
 
68
 
69
  # ~~~ Get the data ~~~
70
  # data = {"id": 0, "goal": "Answer the following question: What is the population of Canada?"} # Uses wikipedia
71
  # data = {"id": 0, "goal": "Answer the following question: Who was the NBA champion in 2023?"} # Uses duckduckgo
72
+ data = {
73
+ "id": 0,
74
+ "goal": "Answer the following question: What is the profession and date of birth of Michael Jordan?",
75
+ }
76
  # At first, we retrieve information about Michael Jordan the basketball player
77
  # If we provide feedback, only in the first round, that we are not interested in the basketball player,
78
  # but the statistician, and skip the feedback in the next rounds, we get the correct answer
79
 
80
  # ~~~ Run inference ~~~
81
+ proxy_flow = serve_utils.recursive_mount(
82
+ cl=cl,
83
+ client_id="local",
84
+ flow_type="demo_served",
85
+ config_overrides=cfg,
86
+ initial_state=None,
87
+ dispatch_point_override=None,
88
+ )
 
89
  # ~~~ Print the output ~~~
90
+ input_message = FlowMessage(
91
+ data= data,
92
+ src_flow="Coflows team",
93
+ dst_flow=proxy_flow,
94
+ is_input_msg=True
95
+ )
96
+
97
+ future = proxy_flow.ask(input_message)
98
+
99
+ print(future.get_data())