Update multi_agent.py
Browse files- multi_agent.py +9 -7
multi_agent.py
CHANGED
@@ -43,7 +43,7 @@ def today_tool(text: str) -> str:
|
|
43 |
Any date mathematics should occur outside this function."""
|
44 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
45 |
|
46 |
-
def create_graph(
|
47 |
tavily_tool = TavilySearchResults(max_results=10)
|
48 |
|
49 |
members = ["Researcher"]
|
@@ -87,7 +87,7 @@ def create_graph(model, topic):
|
|
87 |
]
|
88 |
).partial(options=str(options), members=", ".join(members))
|
89 |
|
90 |
-
llm = ChatOpenAI(model=
|
91 |
|
92 |
supervisor_chain = (
|
93 |
prompt
|
@@ -95,6 +95,8 @@ def create_graph(model, topic):
|
|
95 |
| JsonOutputFunctionsParser()
|
96 |
)
|
97 |
|
|
|
|
|
98 |
researcher_agent = create_agent(llm, [tavily_tool, today_tool], system_prompt=
|
99 |
"1. Research content on topic: " + topic + ". "
|
100 |
"2. Based on your research, write an in-depth article on the topic. "
|
@@ -119,8 +121,8 @@ def create_graph(model, topic):
|
|
119 |
|
120 |
return workflow.compile()
|
121 |
|
122 |
-
def run_multi_agent(
|
123 |
-
graph = create_graph(
|
124 |
|
125 |
result = graph.invoke({
|
126 |
"messages": [
|
@@ -130,8 +132,8 @@ def run_multi_agent(llm, topic):
|
|
130 |
|
131 |
article = result['messages'][-1].content
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
|
137 |
return article
|
|
|
43 |
Any date mathematics should occur outside this function."""
|
44 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
45 |
|
46 |
+
def create_graph(llm_manager, llm_agent, topic):
|
47 |
tavily_tool = TavilySearchResults(max_results=10)
|
48 |
|
49 |
members = ["Researcher"]
|
|
|
87 |
]
|
88 |
).partial(options=str(options), members=", ".join(members))
|
89 |
|
90 |
+
llm = ChatOpenAI(model=llm_manager)
|
91 |
|
92 |
supervisor_chain = (
|
93 |
prompt
|
|
|
95 |
| JsonOutputFunctionsParser()
|
96 |
)
|
97 |
|
98 |
+
llm = ChatOpenAI(model=llm_agent)
|
99 |
+
|
100 |
researcher_agent = create_agent(llm, [tavily_tool, today_tool], system_prompt=
|
101 |
"1. Research content on topic: " + topic + ". "
|
102 |
"2. Based on your research, write an in-depth article on the topic. "
|
|
|
121 |
|
122 |
return workflow.compile()
|
123 |
|
124 |
+
def run_multi_agent(llm_manager, llm_agent, topic):
|
125 |
+
graph = create_graph(llm_manager, llm_agent, topic)
|
126 |
|
127 |
result = graph.invoke({
|
128 |
"messages": [
|
|
|
132 |
|
133 |
article = result['messages'][-1].content
|
134 |
|
135 |
+
print("===")
|
136 |
+
print(article)
|
137 |
+
print("===")
|
138 |
|
139 |
return article
|