Update multi_agent.py
Browse files- multi_agent.py +13 -33
multi_agent.py
CHANGED
@@ -43,10 +43,10 @@ def today_tool(text: str) -> str:
|
|
43 |
Any date mathematics should occur outside this function."""
|
44 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
45 |
|
46 |
-
def create_graph(model,
|
47 |
tavily_tool = TavilySearchResults(max_results=10)
|
48 |
|
49 |
-
members = ["
|
50 |
options = ["FINISH"] + members
|
51 |
|
52 |
system_prompt = (
|
@@ -87,7 +87,7 @@ def create_graph(model, max_tokens, temperature, topic):
|
|
87 |
]
|
88 |
).partial(options=str(options), members=", ".join(members))
|
89 |
|
90 |
-
llm = ChatOpenAI(model=model
|
91 |
|
92 |
supervisor_chain = (
|
93 |
prompt
|
@@ -95,38 +95,18 @@ def create_graph(model, max_tokens, temperature, topic):
|
|
95 |
| JsonOutputFunctionsParser()
|
96 |
)
|
97 |
|
98 |
-
|
99 |
-
"
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
"the Content Writer to write an article on this topic.")
|
106 |
-
content_planner_node = functools.partial(agent_node, agent=content_planner_agent, name="Content Planner")
|
107 |
|
108 |
-
content_writer_agent = create_agent(llm, [today_tool], system_prompt=
|
109 |
-
"You are a Content Writer working on writing "
|
110 |
-
"a new opinion piece about the topic: " + topic + ". "
|
111 |
-
"You base your writing on the work of "
|
112 |
-
"the Content Planner, who provides an outline "
|
113 |
-
"and relevant context about the topic. "
|
114 |
-
"You follow the main objectives and "
|
115 |
-
"direction of the outline, "
|
116 |
-
"as provide by the Content Planner. "
|
117 |
-
"You also provide objective and impartial insights "
|
118 |
-
"and back them up with information "
|
119 |
-
"provide by the Content Planner. "
|
120 |
-
"You acknowledge in your opinion piece "
|
121 |
-
"when your statements are opinions "
|
122 |
-
"as opposed to objective statements.")
|
123 |
-
content_writer_node = functools.partial(agent_node, agent=content_writer_agent, name="Content Writer")
|
124 |
-
|
125 |
workflow = StateGraph(AgentState)
|
126 |
|
127 |
workflow.add_node("Manager", supervisor_chain)
|
128 |
-
workflow.add_node("
|
129 |
-
workflow.add_node("Content Writer", content_writer_node)
|
130 |
|
131 |
for member in members:
|
132 |
workflow.add_edge(member, "Manager")
|
@@ -139,8 +119,8 @@ def create_graph(model, max_tokens, temperature, topic):
|
|
139 |
|
140 |
return workflow.compile()
|
141 |
|
142 |
-
def run_multi_agent(llm,
|
143 |
-
graph = create_graph(llm,
|
144 |
|
145 |
result = graph.invoke({
|
146 |
"messages": [
|
|
|
43 |
Any date mathematics should occur outside this function."""
|
44 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
45 |
|
46 |
+
def create_graph(model, topic):
|
47 |
tavily_tool = TavilySearchResults(max_results=10)
|
48 |
|
49 |
+
members = ["Researcher"]
|
50 |
options = ["FINISH"] + members
|
51 |
|
52 |
system_prompt = (
|
|
|
87 |
]
|
88 |
).partial(options=str(options), members=", ".join(members))
|
89 |
|
90 |
+
llm = ChatOpenAI(model=model)
|
91 |
|
92 |
supervisor_chain = (
|
93 |
prompt
|
|
|
95 |
| JsonOutputFunctionsParser()
|
96 |
)
|
97 |
|
98 |
+
researcher_agent = create_agent(llm, [tavily_tool, today_tool], system_prompt=
|
99 |
+
"1. Research content on topic: " + topic + ". "
|
100 |
+
"2. Based on your research, write an in-depth article on the topic. "
|
101 |
+
"3. The output must be in markdown format (omit the triple backticks). "
|
102 |
+
"4. At the beginning of the article, add current date and author: Multi-Agent AI System. "
|
103 |
+
"5. Also at the beginning of the article, add a references section with links to relevant content.")
|
104 |
+
researcher_node = functools.partial(agent_node, agent=researcher_agent, name="Researcher")
|
|
|
|
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
workflow = StateGraph(AgentState)
|
107 |
|
108 |
workflow.add_node("Manager", supervisor_chain)
|
109 |
+
workflow.add_node("Researcher", researcher_node)
|
|
|
110 |
|
111 |
for member in members:
|
112 |
workflow.add_edge(member, "Manager")
|
|
|
119 |
|
120 |
return workflow.compile()
|
121 |
|
122 |
+
def run_multi_agent(llm, topic):
|
123 |
+
graph = create_graph(llm, topic)
|
124 |
|
125 |
result = graph.invoke({
|
126 |
"messages": [
|