Spaces:
Runtime error
Runtime error
eaglelandsonce
commited on
Commit
•
dc13c57
1
Parent(s):
b6dfb48
Upload 30 files
Browse files- crewai/__init__.py +4 -0
- crewai/agent.py +237 -0
- crewai/agents/__init__.py +4 -0
- crewai/agents/cache/__init__.py +2 -0
- crewai/agents/cache/cache_handler.py +18 -0
- crewai/agents/cache/cache_hit.py +18 -0
- crewai/agents/exceptions.py +30 -0
- crewai/agents/executor.py +214 -0
- crewai/agents/output_parser.py +79 -0
- crewai/agents/tools_handler.py +44 -0
- crewai/crew.py +173 -0
- crewai/process.py +11 -0
- crewai/task.py +86 -0
- crewai/tasks/__init__.py +0 -0
- crewai/tasks/task_output.py +17 -0
- crewai/telemtry/__init__.py +1 -0
- crewai/tools/__init__.py +0 -0
- crewai/tools/agent_tools.py +64 -0
- crewai/tools/cache_tools.py +28 -0
- crewai/tools/gemini_tools.py +66 -0
- crewai/tools/mixtral_tools.py +82 -0
- crewai/tools/phi2_tools.py +52 -0
- crewai/tools/zephyr_tools.py +86 -0
- crewai/translations/el.json +26 -0
- crewai/translations/en.json +26 -0
- crewai/utilities/__init__.py +4 -0
- crewai/utilities/i18n.py +51 -0
- crewai/utilities/logger.py +11 -0
- crewai/utilities/prompts.py +32 -0
- crewai/utilities/rpm_controller.py +59 -0
crewai/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crewai.agent import Agent
|
2 |
+
from crewai.crew import Crew
|
3 |
+
from crewai.process import Process
|
4 |
+
from crewai.task import Task
|
crewai/agent.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
from typing import Any, List, Optional
|
3 |
+
|
4 |
+
from langchain.agents.format_scratchpad import format_log_to_str
|
5 |
+
from langchain.agents.agent import RunnableAgent
|
6 |
+
from langchain.memory import ConversationSummaryMemory
|
7 |
+
from langchain.tools.render import render_text_description
|
8 |
+
from langchain_core.runnables.config import RunnableConfig
|
9 |
+
from langchain_openai import ChatOpenAI
|
10 |
+
from langchain_core.language_models import BaseLanguageModel
|
11 |
+
from pydantic import (
|
12 |
+
UUID4,
|
13 |
+
BaseModel,
|
14 |
+
ConfigDict,
|
15 |
+
Field,
|
16 |
+
InstanceOf,
|
17 |
+
PrivateAttr,
|
18 |
+
field_validator,
|
19 |
+
model_validator,
|
20 |
+
)
|
21 |
+
from pydantic_core import PydanticCustomError
|
22 |
+
|
23 |
+
from crewai.agents import (
|
24 |
+
CacheHandler,
|
25 |
+
CrewAgentExecutor,
|
26 |
+
CrewAgentOutputParser,
|
27 |
+
ToolsHandler,
|
28 |
+
)
|
29 |
+
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
30 |
+
|
31 |
+
|
32 |
+
class Agent(BaseModel):
|
33 |
+
"""Represents an agent in a system.
|
34 |
+
Each agent has a role, a goal, a backstory, and an optional language model (llm).
|
35 |
+
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
|
36 |
+
Attributes:
|
37 |
+
agent_executor: An instance of the CrewAgentExecutor class.
|
38 |
+
role: The role of the agent.
|
39 |
+
goal: The objective of the agent.
|
40 |
+
backstory: The backstory of the agent.
|
41 |
+
llm: The language model that will run the agent.
|
42 |
+
max_iter: Maximum number of iterations for an agent to execute a task.
|
43 |
+
memory: Whether the agent should have memory or not.
|
44 |
+
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
45 |
+
verbose: Whether the agent execution should be in verbose mode.
|
46 |
+
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
47 |
+
tools: Tools at agents disposal
|
48 |
+
"""
|
49 |
+
|
50 |
+
__hash__ = object.__hash__ # type: ignore
|
51 |
+
_logger: Logger = PrivateAttr()
|
52 |
+
_rpm_controller: RPMController = PrivateAttr(default=None)
|
53 |
+
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
54 |
+
|
55 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
56 |
+
id: UUID4 = Field(
|
57 |
+
default_factory=uuid.uuid4,
|
58 |
+
frozen=True,
|
59 |
+
description="Unique identifier for the object, not set by user.",
|
60 |
+
)
|
61 |
+
role: str = Field(description="Role of the agent")
|
62 |
+
goal: str = Field(description="Objective of the agent")
|
63 |
+
backstory: str = Field(description="Backstory of the agent")
|
64 |
+
max_rpm: Optional[int] = Field(
|
65 |
+
default=None,
|
66 |
+
description="Maximum number of requests per minute for the agent execution to be respected.",
|
67 |
+
)
|
68 |
+
memory: bool = Field(
|
69 |
+
default=True, description="Whether the agent should have memory or not"
|
70 |
+
)
|
71 |
+
verbose: bool = Field(
|
72 |
+
default=False, description="Verbose mode for the Agent Execution"
|
73 |
+
)
|
74 |
+
allow_delegation: bool = Field(
|
75 |
+
default=True, description="Allow delegation of tasks to agents"
|
76 |
+
)
|
77 |
+
tools: List[Any] = Field(
|
78 |
+
default_factory=list, description="Tools at agents disposal"
|
79 |
+
)
|
80 |
+
max_iter: Optional[int] = Field(
|
81 |
+
default=15, description="Maximum iterations for an agent to execute a task"
|
82 |
+
)
|
83 |
+
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
|
84 |
+
default=None, description="An instance of the CrewAgentExecutor class."
|
85 |
+
)
|
86 |
+
tools_handler: InstanceOf[ToolsHandler] = Field(
|
87 |
+
default=None, description="An instance of the ToolsHandler class."
|
88 |
+
)
|
89 |
+
cache_handler: InstanceOf[CacheHandler] = Field(
|
90 |
+
default=CacheHandler(), description="An instance of the CacheHandler class."
|
91 |
+
)
|
92 |
+
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
93 |
+
llm: Any = Field(
|
94 |
+
default_factory=lambda: ChatOpenAI(
|
95 |
+
model="gpt-4",
|
96 |
+
),
|
97 |
+
description="Language model that will run the agent.",
|
98 |
+
)
|
99 |
+
|
100 |
+
@field_validator("id", mode="before")
|
101 |
+
@classmethod
|
102 |
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
103 |
+
if v:
|
104 |
+
raise PydanticCustomError(
|
105 |
+
"may_not_set_field", "This field is not to be set by the user.", {}
|
106 |
+
)
|
107 |
+
|
108 |
+
@model_validator(mode="after")
|
109 |
+
def set_private_attrs(self):
|
110 |
+
"""Set private attributes."""
|
111 |
+
self._logger = Logger(self.verbose)
|
112 |
+
if self.max_rpm and not self._rpm_controller:
|
113 |
+
self._rpm_controller = RPMController(
|
114 |
+
max_rpm=self.max_rpm, logger=self._logger
|
115 |
+
)
|
116 |
+
return self
|
117 |
+
|
118 |
+
@model_validator(mode="after")
|
119 |
+
def check_agent_executor(self) -> "Agent":
|
120 |
+
"""Check if the agent executor is set."""
|
121 |
+
if not self.agent_executor:
|
122 |
+
self.set_cache_handler(self.cache_handler)
|
123 |
+
return self
|
124 |
+
|
125 |
+
def execute_task(
|
126 |
+
self,
|
127 |
+
task: str,
|
128 |
+
context: Optional[str] = None,
|
129 |
+
tools: Optional[List[Any]] = None,
|
130 |
+
) -> str:
|
131 |
+
"""Execute a task with the agent.
|
132 |
+
Args:
|
133 |
+
task: Task to execute.
|
134 |
+
context: Context to execute the task in.
|
135 |
+
tools: Tools to use for the task.
|
136 |
+
Returns:
|
137 |
+
Output of the agent
|
138 |
+
"""
|
139 |
+
|
140 |
+
if context:
|
141 |
+
task = self.i18n.slice("task_with_context").format(
|
142 |
+
task=task, context=context
|
143 |
+
)
|
144 |
+
|
145 |
+
tools = tools or self.tools
|
146 |
+
self.agent_executor.tools = tools
|
147 |
+
|
148 |
+
result = self.agent_executor.invoke(
|
149 |
+
{
|
150 |
+
"input": task,
|
151 |
+
"tool_names": self.__tools_names(tools),
|
152 |
+
"tools": render_text_description(tools),
|
153 |
+
},
|
154 |
+
RunnableConfig(callbacks=[self.tools_handler]),
|
155 |
+
)["output"]
|
156 |
+
|
157 |
+
if self.max_rpm:
|
158 |
+
self._rpm_controller.stop_rpm_counter()
|
159 |
+
|
160 |
+
return result
|
161 |
+
|
162 |
+
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
|
163 |
+
"""Set the cache handler for the agent.
|
164 |
+
Args:
|
165 |
+
cache_handler: An instance of the CacheHandler class.
|
166 |
+
"""
|
167 |
+
self.cache_handler = cache_handler
|
168 |
+
self.tools_handler = ToolsHandler(cache=self.cache_handler)
|
169 |
+
self.__create_agent_executor()
|
170 |
+
|
171 |
+
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
172 |
+
"""Set the rpm controller for the agent.
|
173 |
+
Args:
|
174 |
+
rpm_controller: An instance of the RPMController class.
|
175 |
+
"""
|
176 |
+
if not self._rpm_controller:
|
177 |
+
self._rpm_controller = rpm_controller
|
178 |
+
self.__create_agent_executor()
|
179 |
+
|
180 |
+
def __create_agent_executor(self) -> None:
|
181 |
+
"""Create an agent executor for the agent.
|
182 |
+
Returns:
|
183 |
+
An instance of the CrewAgentExecutor class.
|
184 |
+
"""
|
185 |
+
agent_args = {
|
186 |
+
"input": lambda x: x["input"],
|
187 |
+
"tools": lambda x: x["tools"],
|
188 |
+
"tool_names": lambda x: x["tool_names"],
|
189 |
+
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
190 |
+
}
|
191 |
+
executor_args = {
|
192 |
+
"i18n": self.i18n,
|
193 |
+
"tools": self.tools,
|
194 |
+
"verbose": self.verbose,
|
195 |
+
"handle_parsing_errors": True,
|
196 |
+
"max_iterations": self.max_iter,
|
197 |
+
}
|
198 |
+
|
199 |
+
if self._rpm_controller:
|
200 |
+
executor_args["request_within_rpm_limit"] = (
|
201 |
+
self._rpm_controller.check_or_wait
|
202 |
+
)
|
203 |
+
|
204 |
+
if self.memory:
|
205 |
+
summary_memory = ConversationSummaryMemory(
|
206 |
+
llm=self.llm, input_key="input", memory_key="chat_history"
|
207 |
+
)
|
208 |
+
executor_args["memory"] = summary_memory
|
209 |
+
agent_args["chat_history"] = lambda x: x["chat_history"]
|
210 |
+
prompt = Prompts(i18n=self.i18n).task_execution_with_memory()
|
211 |
+
else:
|
212 |
+
prompt = Prompts(i18n=self.i18n).task_execution()
|
213 |
+
|
214 |
+
execution_prompt = prompt.partial(
|
215 |
+
goal=self.goal,
|
216 |
+
role=self.role,
|
217 |
+
backstory=self.backstory,
|
218 |
+
)
|
219 |
+
|
220 |
+
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
221 |
+
inner_agent = (
|
222 |
+
agent_args
|
223 |
+
| execution_prompt
|
224 |
+
| bind
|
225 |
+
| CrewAgentOutputParser(
|
226 |
+
tools_handler=self.tools_handler,
|
227 |
+
cache=self.cache_handler,
|
228 |
+
i18n=self.i18n,
|
229 |
+
)
|
230 |
+
)
|
231 |
+
self.agent_executor = CrewAgentExecutor(
|
232 |
+
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
233 |
+
)
|
234 |
+
|
235 |
+
@staticmethod
|
236 |
+
def __tools_names(tools) -> str:
|
237 |
+
return ", ".join([t.name for t in tools])
|
crewai/agents/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .cache.cache_handler import CacheHandler
|
2 |
+
from .executor import CrewAgentExecutor
|
3 |
+
from .output_parser import CrewAgentOutputParser
|
4 |
+
from .tools_handler import ToolsHandler
|
crewai/agents/cache/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .cache_handler import CacheHandler
|
2 |
+
from .cache_hit import CacheHit
|
crewai/agents/cache/cache_handler.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
|
4 |
+
class CacheHandler:
|
5 |
+
"""Callback handler for tool usage."""
|
6 |
+
|
7 |
+
_cache: dict = {}
|
8 |
+
|
9 |
+
def __init__(self):
|
10 |
+
self._cache = {}
|
11 |
+
|
12 |
+
def add(self, tool, input, output):
|
13 |
+
input = input.strip()
|
14 |
+
self._cache[f"{tool}-{input}"] = output
|
15 |
+
|
16 |
+
def read(self, tool, input) -> Optional[str]:
|
17 |
+
input = input.strip()
|
18 |
+
return self._cache.get(f"{tool}-{input}")
|
crewai/agents/cache/cache_hit.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any
|
2 |
+
|
3 |
+
from pydantic import BaseModel, Field
|
4 |
+
|
5 |
+
from .cache_handler import CacheHandler
|
6 |
+
|
7 |
+
|
8 |
+
class CacheHit(BaseModel):
|
9 |
+
"""Cache Hit Object."""
|
10 |
+
|
11 |
+
class Config:
|
12 |
+
arbitrary_types_allowed = True
|
13 |
+
|
14 |
+
# Making it Any instead of AgentAction to avoind
|
15 |
+
# pydantic v1 vs v2 incompatibility, langchain should
|
16 |
+
# soon be updated to pydantic v2
|
17 |
+
action: Any = Field(description="Action taken")
|
18 |
+
cache: CacheHandler = Field(description="Cache Handler for the tool")
|
crewai/agents/exceptions.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.exceptions import OutputParserException
|
2 |
+
|
3 |
+
from crewai.utilities import I18N
|
4 |
+
|
5 |
+
|
6 |
+
class TaskRepeatedUsageException(OutputParserException):
|
7 |
+
"""Exception raised when a task is used twice in a roll."""
|
8 |
+
|
9 |
+
i18n: I18N = I18N()
|
10 |
+
error: str = "TaskRepeatedUsageException"
|
11 |
+
message: str
|
12 |
+
|
13 |
+
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
|
14 |
+
self.i18n = i18n
|
15 |
+
self.text = text
|
16 |
+
self.tool = tool
|
17 |
+
self.tool_input = tool_input
|
18 |
+
self.message = self.i18n.errors("task_repeated_usage").format(
|
19 |
+
tool=tool, tool_input=tool_input
|
20 |
+
)
|
21 |
+
|
22 |
+
super().__init__(
|
23 |
+
error=self.error,
|
24 |
+
observation=self.message,
|
25 |
+
send_to_llm=True,
|
26 |
+
llm_output=self.text,
|
27 |
+
)
|
28 |
+
|
29 |
+
def __str__(self):
|
30 |
+
return self.message
|
crewai/agents/executor.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
3 |
+
|
4 |
+
from langchain.agents import AgentExecutor
|
5 |
+
from langchain.agents.agent import ExceptionTool
|
6 |
+
from langchain.agents.tools import InvalidTool
|
7 |
+
from langchain.callbacks.manager import CallbackManagerForChainRun
|
8 |
+
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
9 |
+
from langchain_core.exceptions import OutputParserException
|
10 |
+
from langchain_core.pydantic_v1 import root_validator
|
11 |
+
from langchain_core.tools import BaseTool
|
12 |
+
from langchain_core.utils.input import get_color_mapping
|
13 |
+
|
14 |
+
from crewai.agents.cache.cache_hit import CacheHit
|
15 |
+
from crewai.tools.cache_tools import CacheTools
|
16 |
+
from crewai.utilities import I18N
|
17 |
+
|
18 |
+
|
19 |
+
class CrewAgentExecutor(AgentExecutor):
|
20 |
+
i18n: I18N = I18N()
|
21 |
+
iterations: int = 0
|
22 |
+
request_within_rpm_limit: Any = None
|
23 |
+
max_iterations: Optional[int] = 15
|
24 |
+
force_answer_max_iterations: Optional[int] = None
|
25 |
+
|
26 |
+
@root_validator()
|
27 |
+
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
28 |
+
values["force_answer_max_iterations"] = values["max_iterations"] - 2
|
29 |
+
return values
|
30 |
+
|
31 |
+
def _should_force_answer(self) -> bool:
|
32 |
+
return True if self.iterations == self.force_answer_max_iterations else False
|
33 |
+
|
34 |
+
def _force_answer(self, output: AgentAction):
|
35 |
+
return AgentStep(
|
36 |
+
action=output, observation=self.i18n.errors("force_final_answer")
|
37 |
+
)
|
38 |
+
|
39 |
+
def _call(
|
40 |
+
self,
|
41 |
+
inputs: Dict[str, str],
|
42 |
+
run_manager: Optional[CallbackManagerForChainRun] = None,
|
43 |
+
) -> Dict[str, Any]:
|
44 |
+
"""Run text through and get agent response."""
|
45 |
+
# Construct a mapping of tool name to tool for easy lookup
|
46 |
+
name_to_tool_map = {tool.name: tool for tool in self.tools}
|
47 |
+
# We construct a mapping from each tool to a color, used for logging.
|
48 |
+
color_mapping = get_color_mapping(
|
49 |
+
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
50 |
+
)
|
51 |
+
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
52 |
+
# Let's start tracking the number of iterations and time elapsed
|
53 |
+
self.iterations = 0
|
54 |
+
time_elapsed = 0.0
|
55 |
+
start_time = time.time()
|
56 |
+
# We now enter the agent loop (until it returns something).
|
57 |
+
while self._should_continue(self.iterations, time_elapsed):
|
58 |
+
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
59 |
+
next_step_output = self._take_next_step(
|
60 |
+
name_to_tool_map,
|
61 |
+
color_mapping,
|
62 |
+
inputs,
|
63 |
+
intermediate_steps,
|
64 |
+
run_manager=run_manager,
|
65 |
+
)
|
66 |
+
if isinstance(next_step_output, AgentFinish):
|
67 |
+
return self._return(
|
68 |
+
next_step_output, intermediate_steps, run_manager=run_manager
|
69 |
+
)
|
70 |
+
|
71 |
+
intermediate_steps.extend(next_step_output)
|
72 |
+
if len(next_step_output) == 1:
|
73 |
+
next_step_action = next_step_output[0]
|
74 |
+
# See if tool should return directly
|
75 |
+
tool_return = self._get_tool_return(next_step_action)
|
76 |
+
if tool_return is not None:
|
77 |
+
return self._return(
|
78 |
+
tool_return, intermediate_steps, run_manager=run_manager
|
79 |
+
)
|
80 |
+
self.iterations += 1
|
81 |
+
time_elapsed = time.time() - start_time
|
82 |
+
output = self.agent.return_stopped_response(
|
83 |
+
self.early_stopping_method, intermediate_steps, **inputs
|
84 |
+
)
|
85 |
+
return self._return(output, intermediate_steps, run_manager=run_manager)
|
86 |
+
|
87 |
+
def _iter_next_step(
|
88 |
+
self,
|
89 |
+
name_to_tool_map: Dict[str, BaseTool],
|
90 |
+
color_mapping: Dict[str, str],
|
91 |
+
inputs: Dict[str, str],
|
92 |
+
intermediate_steps: List[Tuple[AgentAction, str]],
|
93 |
+
run_manager: Optional[CallbackManagerForChainRun] = None,
|
94 |
+
) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:
|
95 |
+
"""Take a single step in the thought-action-observation loop.
|
96 |
+
|
97 |
+
Override this to take control of how the agent makes and acts on choices.
|
98 |
+
"""
|
99 |
+
try:
|
100 |
+
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
101 |
+
|
102 |
+
# Call the LLM to see what to do.
|
103 |
+
output = self.agent.plan(
|
104 |
+
intermediate_steps,
|
105 |
+
callbacks=run_manager.get_child() if run_manager else None,
|
106 |
+
**inputs,
|
107 |
+
)
|
108 |
+
if self._should_force_answer():
|
109 |
+
if isinstance(output, AgentAction) or isinstance(output, AgentFinish):
|
110 |
+
output = output
|
111 |
+
elif isinstance(output, CacheHit):
|
112 |
+
output = output.action
|
113 |
+
else:
|
114 |
+
raise ValueError(
|
115 |
+
f"Unexpected output type from agent: {type(output)}"
|
116 |
+
)
|
117 |
+
yield self._force_answer(output)
|
118 |
+
return
|
119 |
+
|
120 |
+
except OutputParserException as e:
|
121 |
+
if isinstance(self.handle_parsing_errors, bool):
|
122 |
+
raise_error = not self.handle_parsing_errors
|
123 |
+
else:
|
124 |
+
raise_error = False
|
125 |
+
if raise_error:
|
126 |
+
raise ValueError(
|
127 |
+
"An output parsing error occurred. "
|
128 |
+
"In order to pass this error back to the agent and have it try "
|
129 |
+
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
|
130 |
+
f"This is the error: {str(e)}"
|
131 |
+
)
|
132 |
+
text = str(e)
|
133 |
+
if isinstance(self.handle_parsing_errors, bool):
|
134 |
+
if e.send_to_llm:
|
135 |
+
observation = str(e.observation)
|
136 |
+
text = str(e.llm_output)
|
137 |
+
else:
|
138 |
+
observation = "Invalid or incomplete response"
|
139 |
+
elif isinstance(self.handle_parsing_errors, str):
|
140 |
+
observation = self.handle_parsing_errors
|
141 |
+
elif callable(self.handle_parsing_errors):
|
142 |
+
observation = self.handle_parsing_errors(e)
|
143 |
+
else:
|
144 |
+
raise ValueError("Got unexpected type of `handle_parsing_errors`")
|
145 |
+
output = AgentAction("_Exception", observation, text)
|
146 |
+
if run_manager:
|
147 |
+
run_manager.on_agent_action(output, color="green")
|
148 |
+
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
149 |
+
observation = ExceptionTool().run(
|
150 |
+
output.tool_input,
|
151 |
+
verbose=self.verbose,
|
152 |
+
color=None,
|
153 |
+
callbacks=run_manager.get_child() if run_manager else None,
|
154 |
+
**tool_run_kwargs,
|
155 |
+
)
|
156 |
+
|
157 |
+
if self._should_force_answer():
|
158 |
+
yield self._force_answer(output)
|
159 |
+
return
|
160 |
+
|
161 |
+
yield AgentStep(action=output, observation=observation)
|
162 |
+
return
|
163 |
+
|
164 |
+
# If the tool chosen is the finishing tool, then we end and return.
|
165 |
+
if isinstance(output, AgentFinish):
|
166 |
+
yield output
|
167 |
+
return
|
168 |
+
|
169 |
+
# Override tool usage to use CacheTools
|
170 |
+
if isinstance(output, CacheHit):
|
171 |
+
cache = output.cache
|
172 |
+
action = output.action
|
173 |
+
tool = CacheTools(cache_handler=cache).tool()
|
174 |
+
output = action.copy()
|
175 |
+
output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
|
176 |
+
output.tool = tool.name
|
177 |
+
name_to_tool_map[tool.name] = tool
|
178 |
+
color_mapping[tool.name] = color_mapping[action.tool]
|
179 |
+
|
180 |
+
actions: List[AgentAction]
|
181 |
+
actions = [output] if isinstance(output, AgentAction) else output
|
182 |
+
yield from actions
|
183 |
+
for agent_action in actions:
|
184 |
+
if run_manager:
|
185 |
+
run_manager.on_agent_action(agent_action, color="green")
|
186 |
+
# Otherwise we lookup the tool
|
187 |
+
if agent_action.tool in name_to_tool_map:
|
188 |
+
tool = name_to_tool_map[agent_action.tool]
|
189 |
+
return_direct = tool.return_direct
|
190 |
+
color = color_mapping[agent_action.tool]
|
191 |
+
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
192 |
+
if return_direct:
|
193 |
+
tool_run_kwargs["llm_prefix"] = ""
|
194 |
+
# We then call the tool on the tool input to get an observation
|
195 |
+
observation = tool.run(
|
196 |
+
agent_action.tool_input,
|
197 |
+
verbose=self.verbose,
|
198 |
+
color=color,
|
199 |
+
callbacks=run_manager.get_child() if run_manager else None,
|
200 |
+
**tool_run_kwargs,
|
201 |
+
)
|
202 |
+
else:
|
203 |
+
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
204 |
+
observation = InvalidTool().run(
|
205 |
+
{
|
206 |
+
"requested_tool_name": agent_action.tool,
|
207 |
+
"available_tool_names": list(name_to_tool_map.keys()),
|
208 |
+
},
|
209 |
+
verbose=self.verbose,
|
210 |
+
color=None,
|
211 |
+
callbacks=run_manager.get_child() if run_manager else None,
|
212 |
+
**tool_run_kwargs,
|
213 |
+
)
|
214 |
+
yield AgentStep(action=agent_action, observation=observation)
|
crewai/agents/output_parser.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from typing import Union
|
3 |
+
|
4 |
+
from langchain.agents.output_parsers import ReActSingleInputOutputParser
|
5 |
+
from langchain_core.agents import AgentAction, AgentFinish
|
6 |
+
|
7 |
+
from crewai.agents.cache import CacheHandler, CacheHit
|
8 |
+
from crewai.agents.exceptions import TaskRepeatedUsageException
|
9 |
+
from crewai.agents.tools_handler import ToolsHandler
|
10 |
+
from crewai.utilities import I18N
|
11 |
+
|
12 |
+
FINAL_ANSWER_ACTION = "Final Answer:"
|
13 |
+
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
|
14 |
+
"Parsing LLM output produced both a final answer and a parse-able action:"
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
class CrewAgentOutputParser(ReActSingleInputOutputParser):
|
19 |
+
"""Parses ReAct-style LLM calls that have a single tool input.
|
20 |
+
|
21 |
+
Expects output to be in one of two formats.
|
22 |
+
|
23 |
+
If the output signals that an action should be taken,
|
24 |
+
should be in the below format. This will result in an AgentAction
|
25 |
+
being returned.
|
26 |
+
|
27 |
+
```
|
28 |
+
Thought: agent thought here
|
29 |
+
Action: search
|
30 |
+
Action Input: what is the temperature in SF?
|
31 |
+
```
|
32 |
+
|
33 |
+
If the output signals that a final answer should be given,
|
34 |
+
should be in the below format. This will result in an AgentFinish
|
35 |
+
being returned.
|
36 |
+
|
37 |
+
```
|
38 |
+
Thought: agent thought here
|
39 |
+
Final Answer: The temperature is 100 degrees
|
40 |
+
```
|
41 |
+
|
42 |
+
It also prevents tools from being reused in a roll.
|
43 |
+
"""
|
44 |
+
|
45 |
+
class Config:
|
46 |
+
arbitrary_types_allowed = True
|
47 |
+
|
48 |
+
tools_handler: ToolsHandler
|
49 |
+
cache: CacheHandler
|
50 |
+
i18n: I18N
|
51 |
+
|
52 |
+
def parse(self, text: str) -> Union[AgentAction, AgentFinish, CacheHit]:
|
53 |
+
regex = (
|
54 |
+
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
55 |
+
)
|
56 |
+
if action_match := re.search(regex, text, re.DOTALL):
|
57 |
+
action = action_match.group(1).strip()
|
58 |
+
action_input = action_match.group(2)
|
59 |
+
tool_input = action_input.strip(" ")
|
60 |
+
tool_input = tool_input.strip('"')
|
61 |
+
|
62 |
+
if last_tool_usage := self.tools_handler.last_used_tool:
|
63 |
+
usage = {
|
64 |
+
"tool": action,
|
65 |
+
"input": tool_input,
|
66 |
+
}
|
67 |
+
if usage == last_tool_usage:
|
68 |
+
raise TaskRepeatedUsageException(
|
69 |
+
text=text,
|
70 |
+
tool=action,
|
71 |
+
tool_input=tool_input,
|
72 |
+
i18n=self.i18n,
|
73 |
+
)
|
74 |
+
|
75 |
+
if self.cache.read(action, tool_input):
|
76 |
+
action = AgentAction(action, tool_input, text)
|
77 |
+
return CacheHit(action=action, cache=self.cache)
|
78 |
+
|
79 |
+
return super().parse(text)
|
crewai/agents/tools_handler.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, Dict
|
2 |
+
|
3 |
+
from langchain.callbacks.base import BaseCallbackHandler
|
4 |
+
|
5 |
+
from ..tools.cache_tools import CacheTools
|
6 |
+
from .cache.cache_handler import CacheHandler
|
7 |
+
|
8 |
+
|
9 |
+
class ToolsHandler(BaseCallbackHandler):
|
10 |
+
"""Callback handler for tool usage."""
|
11 |
+
|
12 |
+
last_used_tool: Dict[str, Any] = {}
|
13 |
+
cache: CacheHandler
|
14 |
+
|
15 |
+
def __init__(self, cache: CacheHandler, **kwargs: Any):
|
16 |
+
"""Initialize the callback handler."""
|
17 |
+
self.cache = cache
|
18 |
+
super().__init__(**kwargs)
|
19 |
+
|
20 |
+
def on_tool_start(
|
21 |
+
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
|
22 |
+
) -> Any:
|
23 |
+
"""Run when tool starts running."""
|
24 |
+
name = serialized.get("name")
|
25 |
+
if name not in ["invalid_tool", "_Exception"]:
|
26 |
+
tools_usage = {
|
27 |
+
"tool": name,
|
28 |
+
"input": input_str,
|
29 |
+
}
|
30 |
+
self.last_used_tool = tools_usage
|
31 |
+
|
32 |
+
def on_tool_end(self, output: str, **kwargs: Any) -> Any:
|
33 |
+
"""Run when tool ends running."""
|
34 |
+
if (
|
35 |
+
"is not a valid tool" not in output
|
36 |
+
and "Invalid or incomplete response" not in output
|
37 |
+
and "Invalid Format" not in output
|
38 |
+
):
|
39 |
+
if self.last_used_tool["tool"] != CacheTools().name:
|
40 |
+
self.cache.add(
|
41 |
+
tool=self.last_used_tool["tool"],
|
42 |
+
input=self.last_used_tool["input"],
|
43 |
+
output=output,
|
44 |
+
)
|
crewai/crew.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import uuid
|
3 |
+
from typing import Any, Dict, List, Optional, Union
|
4 |
+
|
5 |
+
from pydantic import (
|
6 |
+
UUID4,
|
7 |
+
BaseModel,
|
8 |
+
ConfigDict,
|
9 |
+
Field,
|
10 |
+
InstanceOf,
|
11 |
+
Json,
|
12 |
+
PrivateAttr,
|
13 |
+
field_validator,
|
14 |
+
model_validator,
|
15 |
+
)
|
16 |
+
from pydantic_core import PydanticCustomError
|
17 |
+
|
18 |
+
from crewai.agent import Agent
|
19 |
+
from crewai.agents.cache import CacheHandler
|
20 |
+
from crewai.process import Process
|
21 |
+
from crewai.task import Task
|
22 |
+
from crewai.tools.agent_tools import AgentTools
|
23 |
+
from crewai.utilities import I18N, Logger, RPMController
|
24 |
+
|
25 |
+
|
26 |
+
class Crew(BaseModel):
|
27 |
+
"""
|
28 |
+
Represents a group of agents, defining how they should collaborate and the tasks they should perform.
|
29 |
+
Attributes:
|
30 |
+
tasks: List of tasks assigned to the crew.
|
31 |
+
agents: List of agents part of this crew.
|
32 |
+
process: The process flow that the crew will follow (e.g., sequential).
|
33 |
+
verbose: Indicates the verbosity level for logging during execution.
|
34 |
+
config: Configuration settings for the crew.
|
35 |
+
_cache_handler: Handles caching for the crew's operations.
|
36 |
+
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
37 |
+
id: A unique identifier for the crew instance.
|
38 |
+
"""
|
39 |
+
|
40 |
+
__hash__ = object.__hash__ # type: ignore
|
41 |
+
_rpm_controller: RPMController = PrivateAttr()
|
42 |
+
_logger: Logger = PrivateAttr()
|
43 |
+
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default=CacheHandler())
|
44 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
45 |
+
tasks: List[Task] = Field(default_factory=list)
|
46 |
+
agents: List[Agent] = Field(default_factory=list)
|
47 |
+
process: Process = Field(default=Process.sequential)
|
48 |
+
verbose: Union[int, bool] = Field(default=0)
|
49 |
+
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
50 |
+
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
51 |
+
max_rpm: Optional[int] = Field(
|
52 |
+
default=None,
|
53 |
+
description="Maximum number of requests per minute for the crew execution to be respected.",
|
54 |
+
)
|
55 |
+
language: str = Field(
|
56 |
+
default="en",
|
57 |
+
description="Language used for the crew, defaults to English.",
|
58 |
+
)
|
59 |
+
|
60 |
+
@field_validator("id", mode="before")
|
61 |
+
@classmethod
|
62 |
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
63 |
+
"""Prevent manual setting of the 'id' field by users."""
|
64 |
+
if v:
|
65 |
+
raise PydanticCustomError(
|
66 |
+
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
67 |
+
)
|
68 |
+
|
69 |
+
@field_validator("config", mode="before")
|
70 |
+
@classmethod
|
71 |
+
def check_config_type(
|
72 |
+
cls, v: Union[Json, Dict[str, Any]]
|
73 |
+
) -> Union[Json, Dict[str, Any]]:
|
74 |
+
"""Validates that the config is a valid type.
|
75 |
+
Args:
|
76 |
+
v: The config to be validated.
|
77 |
+
Returns:
|
78 |
+
The config if it is valid.
|
79 |
+
"""
|
80 |
+
|
81 |
+
# TODO: Improve typing
|
82 |
+
return json.loads(v) if isinstance(v, Json) else v # type: ignore
|
83 |
+
|
84 |
+
@model_validator(mode="after")
|
85 |
+
def set_private_attrs(self) -> "Crew":
|
86 |
+
"""Set private attributes."""
|
87 |
+
self._cache_handler = CacheHandler()
|
88 |
+
self._logger = Logger(self.verbose)
|
89 |
+
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
90 |
+
return self
|
91 |
+
|
92 |
+
@model_validator(mode="after")
|
93 |
+
def check_config(self):
|
94 |
+
"""Validates that the crew is properly configured with agents and tasks."""
|
95 |
+
if not self.config and not self.tasks and not self.agents:
|
96 |
+
raise PydanticCustomError(
|
97 |
+
"missing_keys",
|
98 |
+
"Either 'agents' and 'tasks' need to be set or 'config'.",
|
99 |
+
{},
|
100 |
+
)
|
101 |
+
|
102 |
+
if self.config:
|
103 |
+
self._setup_from_config()
|
104 |
+
|
105 |
+
if self.agents:
|
106 |
+
for agent in self.agents:
|
107 |
+
agent.set_cache_handler(self._cache_handler)
|
108 |
+
agent.set_rpm_controller(self._rpm_controller)
|
109 |
+
return self
|
110 |
+
|
111 |
+
def _setup_from_config(self):
|
112 |
+
assert self.config is not None, "Config should not be None."
|
113 |
+
|
114 |
+
"""Initializes agents and tasks from the provided config."""
|
115 |
+
if not self.config.get("agents") or not self.config.get("tasks"):
|
116 |
+
raise PydanticCustomError(
|
117 |
+
"missing_keys_in_config", "Config should have 'agents' and 'tasks'.", {}
|
118 |
+
)
|
119 |
+
|
120 |
+
self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
121 |
+
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
122 |
+
|
123 |
+
def _create_task(self, task_config: Dict[str, Any]) -> Task:
|
124 |
+
"""Creates a task instance from its configuration.
|
125 |
+
Args:
|
126 |
+
task_config: The configuration of the task.
|
127 |
+
Returns:
|
128 |
+
A task instance.
|
129 |
+
"""
|
130 |
+
task_agent = next(
|
131 |
+
agt for agt in self.agents if agt.role == task_config["agent"]
|
132 |
+
)
|
133 |
+
del task_config["agent"]
|
134 |
+
return Task(**task_config, agent=task_agent)
|
135 |
+
|
136 |
+
def kickoff(self) -> str:
|
137 |
+
"""Starts the crew to work on its assigned tasks."""
|
138 |
+
for agent in self.agents:
|
139 |
+
agent.i18n = I18N(language=self.language)
|
140 |
+
|
141 |
+
if self.process == Process.sequential:
|
142 |
+
return self._sequential_loop()
|
143 |
+
else:
|
144 |
+
raise NotImplementedError(
|
145 |
+
f"The process '{self.process}' is not implemented yet."
|
146 |
+
)
|
147 |
+
|
148 |
+
def _sequential_loop(self) -> str:
|
149 |
+
"""Executes tasks sequentially and returns the final output."""
|
150 |
+
task_output = ""
|
151 |
+
for task in self.tasks:
|
152 |
+
self._prepare_and_execute_task(task)
|
153 |
+
task_output = task.execute(task_output)
|
154 |
+
|
155 |
+
role = task.agent.role if task.agent is not None else "None"
|
156 |
+
self._logger.log("debug", f"[{role}] Task output: {task_output}\n\n")
|
157 |
+
|
158 |
+
if self.max_rpm:
|
159 |
+
self._rpm_controller.stop_rpm_counter()
|
160 |
+
|
161 |
+
return task_output
|
162 |
+
|
163 |
+
def _prepare_and_execute_task(self, task: Task) -> None:
|
164 |
+
"""Prepares and logs information about the task being executed.
|
165 |
+
Args:
|
166 |
+
task: The task to be executed.
|
167 |
+
"""
|
168 |
+
if task.agent is not None and task.agent.allow_delegation:
|
169 |
+
task.tools += AgentTools(agents=self.agents).tools()
|
170 |
+
|
171 |
+
role = task.agent.role if task.agent is not None else "None"
|
172 |
+
self._logger.log("debug", f"Working Agent: {role}")
|
173 |
+
self._logger.log("info", f"Starting Task: {task.description}")
|
crewai/process.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
|
3 |
+
|
4 |
+
class Process(str, Enum):
|
5 |
+
"""
|
6 |
+
Class representing the different processes that can be used to tackle tasks
|
7 |
+
"""
|
8 |
+
|
9 |
+
sequential = "sequential"
|
10 |
+
# TODO: consensual = 'consensual'
|
11 |
+
# TODO: hierarchical = 'hierarchical'
|
crewai/task.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
from typing import Any, List, Optional
|
3 |
+
|
4 |
+
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
5 |
+
from pydantic_core import PydanticCustomError
|
6 |
+
|
7 |
+
from crewai.agent import Agent
|
8 |
+
from crewai.tasks.task_output import TaskOutput
|
9 |
+
from crewai.utilities import I18N
|
10 |
+
|
11 |
+
|
12 |
+
class Task(BaseModel):
|
13 |
+
"""Class that represent a task to be executed."""
|
14 |
+
|
15 |
+
__hash__ = object.__hash__ # type: ignore
|
16 |
+
i18n: I18N = I18N()
|
17 |
+
description: str = Field(description="Description of the actual task.")
|
18 |
+
callback: Optional[Any] = Field(
|
19 |
+
description="Callback to be executed after the task is completed.", default=None
|
20 |
+
)
|
21 |
+
agent: Optional[Agent] = Field(
|
22 |
+
description="Agent responsible for executiong the task.", default=None
|
23 |
+
)
|
24 |
+
expected_output: Optional[str] = Field(
|
25 |
+
description="Clear definition of expected output for the task.",
|
26 |
+
default=None,
|
27 |
+
)
|
28 |
+
output: Optional[TaskOutput] = Field(
|
29 |
+
description="Task output, it's final result after being executed", default=None
|
30 |
+
)
|
31 |
+
tools: List[Any] = Field(
|
32 |
+
default_factory=list,
|
33 |
+
description="Tools the agent is limited to use for this task.",
|
34 |
+
)
|
35 |
+
id: UUID4 = Field(
|
36 |
+
default_factory=uuid.uuid4,
|
37 |
+
frozen=True,
|
38 |
+
description="Unique identifier for the object, not set by user.",
|
39 |
+
)
|
40 |
+
|
41 |
+
@field_validator("id", mode="before")
|
42 |
+
@classmethod
|
43 |
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
44 |
+
if v:
|
45 |
+
raise PydanticCustomError(
|
46 |
+
"may_not_set_field", "This field is not to be set by the user.", {}
|
47 |
+
)
|
48 |
+
|
49 |
+
@model_validator(mode="after")
|
50 |
+
def check_tools(self):
|
51 |
+
"""Check if the tools are set."""
|
52 |
+
if not self.tools and self.agent and self.agent.tools:
|
53 |
+
self.tools.extend(self.agent.tools)
|
54 |
+
return self
|
55 |
+
|
56 |
+
def execute(self, context: Optional[str] = None) -> str:
|
57 |
+
"""Execute the task.
|
58 |
+
Returns:
|
59 |
+
Output of the task.
|
60 |
+
"""
|
61 |
+
if not self.agent:
|
62 |
+
raise Exception(
|
63 |
+
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, either consensual or hierarchical."
|
64 |
+
)
|
65 |
+
|
66 |
+
result = self.agent.execute_task(
|
67 |
+
task=self._prompt(), context=context, tools=self.tools
|
68 |
+
)
|
69 |
+
|
70 |
+
self.output = TaskOutput(description=self.description, result=result)
|
71 |
+
self.callback(self.output) if self.callback else None
|
72 |
+
return result
|
73 |
+
|
74 |
+
def _prompt(self) -> str:
|
75 |
+
"""Prompt the task.
|
76 |
+
Returns:
|
77 |
+
Prompt of the task.
|
78 |
+
"""
|
79 |
+
tasks_slices = [self.description]
|
80 |
+
|
81 |
+
if self.expected_output:
|
82 |
+
output = self.i18n.slice("expected_output").format(
|
83 |
+
expected_output=self.expected_output
|
84 |
+
)
|
85 |
+
tasks_slices = [self.description, output]
|
86 |
+
return "\n".join(tasks_slices)
|
crewai/tasks/__init__.py
ADDED
File without changes
|
crewai/tasks/task_output.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
from pydantic import BaseModel, Field, model_validator
|
4 |
+
|
5 |
+
|
6 |
+
class TaskOutput(BaseModel):
|
7 |
+
"""Class that represents the result of a task."""
|
8 |
+
|
9 |
+
description: str = Field(description="Description of the task")
|
10 |
+
summary: Optional[str] = Field(description="Summary of the task", default=None)
|
11 |
+
result: str = Field(description="Result of the task")
|
12 |
+
|
13 |
+
@model_validator(mode="after")
|
14 |
+
def set_summary(self):
|
15 |
+
excerpt = " ".join(self.description.split(" ")[:10])
|
16 |
+
self.summary = f"{excerpt}..."
|
17 |
+
return self
|
crewai/telemtry/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .telemetry import Telemetry
|
crewai/tools/__init__.py
ADDED
File without changes
|
crewai/tools/agent_tools.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
from langchain.tools import Tool
|
4 |
+
from pydantic import BaseModel, Field
|
5 |
+
|
6 |
+
from crewai.agent import Agent
|
7 |
+
from crewai.utilities import I18N
|
8 |
+
|
9 |
+
|
10 |
+
class AgentTools(BaseModel):
|
11 |
+
"""Default tools around agent delegation"""
|
12 |
+
|
13 |
+
agents: List[Agent] = Field(description="List of agents in this crew.")
|
14 |
+
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
15 |
+
|
16 |
+
def tools(self):
|
17 |
+
return [
|
18 |
+
Tool.from_function(
|
19 |
+
func=self.delegate_work,
|
20 |
+
name="Delegate work to co-worker",
|
21 |
+
description=self.i18n.tools("delegate_work").format(
|
22 |
+
coworkers=", ".join([agent.role for agent in self.agents])
|
23 |
+
),
|
24 |
+
),
|
25 |
+
Tool.from_function(
|
26 |
+
func=self.ask_question,
|
27 |
+
name="Ask question to co-worker",
|
28 |
+
description=self.i18n.tools("ask_question").format(
|
29 |
+
coworkers=", ".join([agent.role for agent in self.agents])
|
30 |
+
),
|
31 |
+
),
|
32 |
+
]
|
33 |
+
|
34 |
+
def delegate_work(self, command):
|
35 |
+
"""Useful to delegate a specific task to a coworker."""
|
36 |
+
return self._execute(command)
|
37 |
+
|
38 |
+
def ask_question(self, command):
|
39 |
+
"""Useful to ask a question, opinion or take from a coworker."""
|
40 |
+
return self._execute(command)
|
41 |
+
|
42 |
+
def _execute(self, command):
|
43 |
+
"""Execute the command."""
|
44 |
+
try:
|
45 |
+
agent, task, context = command.split("|")
|
46 |
+
except ValueError:
|
47 |
+
return self.i18n.errors("agent_tool_missing_param")
|
48 |
+
|
49 |
+
if not agent or not task or not context:
|
50 |
+
return self.i18n.errors("agent_tool_missing_param")
|
51 |
+
|
52 |
+
agent = [
|
53 |
+
available_agent
|
54 |
+
for available_agent in self.agents
|
55 |
+
if available_agent.role == agent
|
56 |
+
]
|
57 |
+
|
58 |
+
if not agent:
|
59 |
+
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
60 |
+
coworkers=", ".join([agent.role for agent in self.agents])
|
61 |
+
)
|
62 |
+
|
63 |
+
agent = agent[0]
|
64 |
+
return agent.execute_task(task, context)
|
crewai/tools/cache_tools.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.tools import Tool
|
2 |
+
from pydantic import BaseModel, ConfigDict, Field
|
3 |
+
|
4 |
+
from crewai.agents.cache import CacheHandler
|
5 |
+
|
6 |
+
|
7 |
+
class CacheTools(BaseModel):
|
8 |
+
"""Default tools to hit the cache."""
|
9 |
+
|
10 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
11 |
+
name: str = "Hit Cache"
|
12 |
+
cache_handler: CacheHandler = Field(
|
13 |
+
description="Cache Handler for the crew",
|
14 |
+
default=CacheHandler(),
|
15 |
+
)
|
16 |
+
|
17 |
+
def tool(self):
|
18 |
+
return Tool.from_function(
|
19 |
+
func=self.hit_cache,
|
20 |
+
name=self.name,
|
21 |
+
description="Reads directly from the cache",
|
22 |
+
)
|
23 |
+
|
24 |
+
def hit_cache(self, key):
|
25 |
+
split = key.split("tool:")
|
26 |
+
tool = split[1].split("|input:")[0].strip()
|
27 |
+
tool_input = split[1].split("|input:")[1].strip()
|
28 |
+
return self.cache_handler.read(tool, tool_input)
|
crewai/tools/gemini_tools.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using gemini
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
import google.generativeai as genai
|
7 |
+
from google.api_core import exceptions
|
8 |
+
|
9 |
+
# Retrieve API Key from Environment Variable
|
10 |
+
GOOGLE_AI_STUDIO = os.environ.get('GOOGLE_API_KEY')
|
11 |
+
|
12 |
+
# Ensure the API key is available
|
13 |
+
if not GOOGLE_AI_STUDIO:
|
14 |
+
raise ValueError("API key not found. Please set the GOOGLE_AI_STUDIO2 environment variable.")
|
15 |
+
|
16 |
+
import requests
|
17 |
+
from langchain.tools import tool
|
18 |
+
|
19 |
+
# Rest of your code remains the same
|
20 |
+
genai.configure(api_key=GOOGLE_AI_STUDIO)
|
21 |
+
model = genai.GenerativeModel('gemini-pro')
|
22 |
+
|
23 |
+
class GeminiSearchTools():
|
24 |
+
@tool("Gemini search the internet")
|
25 |
+
def gemini_search(query):
|
26 |
+
"""
|
27 |
+
Searches for content based on the provided query using the Gemini model.
|
28 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
query (str): The search query.
|
32 |
+
|
33 |
+
Returns:
|
34 |
+
str: The response text from the Gemini model or an error message.
|
35 |
+
"""
|
36 |
+
try:
|
37 |
+
response = model.generate_content(query)
|
38 |
+
return response.text
|
39 |
+
except exceptions.DeadlineExceeded as e:
|
40 |
+
# Handle the DeadlineExceeded exception here
|
41 |
+
print("Error: Deadline Exceeded -", str(e))
|
42 |
+
# You can return a custom message or take other appropriate actions
|
43 |
+
return "Error: The request timed out. Please try again later."
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
@tool("Gemini search news on the internet")
|
48 |
+
def gemini_search_news(query):
|
49 |
+
"""
|
50 |
+
Searches for content based on the provided query using the Gemini model.
|
51 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
query (str): The search query.
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
str: The response text from the Gemini model or an error message.
|
58 |
+
"""
|
59 |
+
try:
|
60 |
+
response = model.generate_content(query)
|
61 |
+
return response.text
|
62 |
+
except exceptions.DeadlineExceeded as e:
|
63 |
+
# Handle the DeadlineExceeded exception here
|
64 |
+
print("Error: Deadline Exceeded -", str(e))
|
65 |
+
# You can return a custom message or take other appropriate actions
|
66 |
+
return "Error: The request timed out. Please try again later."
|
crewai/tools/mixtral_tools.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Mixtral
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
from huggingface_hub import InferenceClient
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
client = InferenceClient(
|
10 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
11 |
+
)
|
12 |
+
|
13 |
+
# Helper Method
|
14 |
+
|
15 |
+
def format_prompt(message, history):
|
16 |
+
prompt = "<s>"
|
17 |
+
for user_prompt, bot_response in history:
|
18 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
19 |
+
prompt += f" {bot_response}</s> "
|
20 |
+
prompt += f"[INST] {message} [/INST]"
|
21 |
+
return prompt
|
22 |
+
|
23 |
+
|
24 |
+
import requests
|
25 |
+
from langchain.tools import tool
|
26 |
+
|
27 |
+
history = ""
|
28 |
+
|
29 |
+
class MixtralSearchTools():
|
30 |
+
@tool("Mixtral Normal")
|
31 |
+
def mixtral_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Mixtral model.
|
34 |
+
Args:
|
35 |
+
query (str): The search query.
|
36 |
+
Returns:
|
37 |
+
str: The response text from the Mixtral model or an error message.
|
38 |
+
"""
|
39 |
+
generate_kwargs = {
|
40 |
+
"temperature": temperature,
|
41 |
+
"max_new_tokens": max_new_tokens,
|
42 |
+
"top_p": top_p,
|
43 |
+
"repetition_penalty": repetition_penalty,
|
44 |
+
"do_sample": True,
|
45 |
+
"seed": 42,
|
46 |
+
}
|
47 |
+
|
48 |
+
formatted_prompt = format_prompt(prompt, history)
|
49 |
+
|
50 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
51 |
+
output = ""
|
52 |
+
for response in stream:
|
53 |
+
output += response.token.text
|
54 |
+
yield output
|
55 |
+
return output
|
56 |
+
|
57 |
+
|
58 |
+
@tool("Mixtral Crazy")
|
59 |
+
def mixtral_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
60 |
+
"""
|
61 |
+
Searches for content based on the provided query using the Mixtral model but has the gaurd rails removed,
|
62 |
+
and responses are crazy and off the wall and sometimes scary.
|
63 |
+
Args:
|
64 |
+
query (str): The search query.
|
65 |
+
Returns:
|
66 |
+
str: The response text from the Mixtral model or an error message.
|
67 |
+
"""
|
68 |
+
generate_kwargs = {
|
69 |
+
"temperature": temperature,
|
70 |
+
"max_new_tokens": max_new_tokens,
|
71 |
+
"top_p": top_p,
|
72 |
+
"repetition_penalty": repetition_penalty,
|
73 |
+
"do_sample": True,
|
74 |
+
"seed": 42,
|
75 |
+
}
|
76 |
+
|
77 |
+
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
78 |
+
output = ""
|
79 |
+
for response in stream:
|
80 |
+
output += response.token.text
|
81 |
+
yield output
|
82 |
+
return output
|
crewai/tools/phi2_tools.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Phi2
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
import requests
|
7 |
+
from langchain.tools import tool
|
8 |
+
|
9 |
+
import spaces
|
10 |
+
import torch
|
11 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
12 |
+
from threading import Thread
|
13 |
+
device = "cpu"
|
14 |
+
if torch.cuda.is_available():
|
15 |
+
device = "cuda"
|
16 |
+
if torch.backends.mps.is_available():
|
17 |
+
device = "mps"
|
18 |
+
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
|
21 |
+
model = AutoModelForCausalLM.from_pretrained(
|
22 |
+
"microsoft/phi-2",
|
23 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
24 |
+
trust_remote_code=True,
|
25 |
+
).to(device)
|
26 |
+
|
27 |
+
|
28 |
+
#@spaces.GPU(enable_queue=True)
|
29 |
+
class Phi2SearchTools():
|
30 |
+
@tool("Phi2 Normal")
|
31 |
+
def phi2_search(text, temperature=.75, maxLen=2048):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Gemini model.
|
34 |
+
Handles DeadlineExceeded exceptions from the Google API.
|
35 |
+
Args:
|
36 |
+
query (str): The search query.
|
37 |
+
Returns:
|
38 |
+
str: The response text from the Gemini model or an error message.
|
39 |
+
"""
|
40 |
+
inputs = tokenizer([text], return_tensors="pt").to(device)
|
41 |
+
streamer = TextIteratorStreamer(tokenizer)
|
42 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=maxLen, temperature=temperature)
|
43 |
+
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
44 |
+
thread.start()
|
45 |
+
t = ""
|
46 |
+
toks = 0
|
47 |
+
for out in streamer:
|
48 |
+
t += out
|
49 |
+
yield t
|
50 |
+
|
51 |
+
|
52 |
+
|
crewai/tools/zephyr_tools.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools created using Zephyr
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
from huggingface_hub import InferenceClient
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
client = InferenceClient(
|
10 |
+
"HuggingFaceH4/zephyr-7b-beta"
|
11 |
+
)
|
12 |
+
|
13 |
+
# Helper Method
|
14 |
+
|
15 |
+
def format_prompt(message, history):
|
16 |
+
prompt = "<s>"
|
17 |
+
for user_prompt, bot_response in history:
|
18 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
19 |
+
prompt += f" {bot_response}</s> "
|
20 |
+
prompt += f"[INST] {message} [/INST]"
|
21 |
+
return prompt
|
22 |
+
|
23 |
+
|
24 |
+
import requests
|
25 |
+
from langchain.tools import tool
|
26 |
+
|
27 |
+
history = ""
|
28 |
+
|
29 |
+
class ZephyrSearchTools():
|
30 |
+
@tool("Zephyr Normal")
|
31 |
+
def zephyr_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
32 |
+
"""
|
33 |
+
Searches for content based on the provided query using the Zephyr model.
|
34 |
+
Args:
|
35 |
+
query (str): The search query.
|
36 |
+
Returns:
|
37 |
+
str: The response text from the Zephyr model or an error message.
|
38 |
+
"""
|
39 |
+
generate_kwargs = {
|
40 |
+
"temperature": temperature,
|
41 |
+
"max_new_tokens": max_new_tokens,
|
42 |
+
"top_p": top_p,
|
43 |
+
"repetition_penalty": repetition_penalty,
|
44 |
+
"do_sample": True,
|
45 |
+
"seed": 42,
|
46 |
+
}
|
47 |
+
|
48 |
+
formatted_prompt = format_prompt(prompt, history)
|
49 |
+
|
50 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
51 |
+
output = ""
|
52 |
+
for response in stream:
|
53 |
+
output += response.token.text
|
54 |
+
yield output
|
55 |
+
return output
|
56 |
+
|
57 |
+
|
58 |
+
@tool("Zephyrl Crazy")
|
59 |
+
def zephyr_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
60 |
+
"""
|
61 |
+
Searches for content based on the provided query using the Zephyr model but has the gaurd rails removed,
|
62 |
+
and responses are crazy and off the wall and sometimes scary.
|
63 |
+
Args:
|
64 |
+
query (str): The search query.
|
65 |
+
Returns:
|
66 |
+
str: The response text from the Zephyr model or an error message.
|
67 |
+
"""
|
68 |
+
generate_kwargs = {
|
69 |
+
"temperature": temperature,
|
70 |
+
"max_new_tokens": max_new_tokens,
|
71 |
+
"top_p": top_p,
|
72 |
+
"repetition_penalty": repetition_penalty,
|
73 |
+
"do_sample": True,
|
74 |
+
"seed": 42,
|
75 |
+
}
|
76 |
+
|
77 |
+
stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
|
78 |
+
output = ""
|
79 |
+
for response in stream:
|
80 |
+
output += response.token.text
|
81 |
+
yield output
|
82 |
+
return output
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
|
crewai/translations/el.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"hierarchical_manager_agent": {
|
3 |
+
"role": "Διευθυντής Ομάδας",
|
4 |
+
"goal": "Διαχειρίσου την ομάδα σου για να ολοκληρώσει την εργασία με τον καλύτερο δυνατό τρόπο.",
|
5 |
+
"backstory": "Είσαι ένας έμπειρος διευθυντής με την ικανότητα να βγάζεις το καλύτερο από την ομάδα σου.\nΕίσαι επίσης γνωστός για την ικανότητά σου να αναθέτεις εργασίες στους σωστούς ανθρώπους και να κάνεις τις σωστές ερωτήσεις για να πάρεις το καλύτερο από την ομάδα σου.\nΑκόμα κι αν δεν εκτελείς εργασίες μόνος σου, έχεις πολλή εμπειρία στον τομέα, που σου επιτρέπει να αξιολογείς σωστά τη δουλειά των μελών της ομάδας σου."
|
6 |
+
},
|
7 |
+
"slices": {
|
8 |
+
"observation": "\nΠαρατήρηση",
|
9 |
+
"task": "Αρχή! Αυτό είναι ΠΟΛΥ σημαντικό για εσάς, η δουλειά σας εξαρτάται από αυτό!\n\nΤρέχουσα εργασία: {input}",
|
10 |
+
"memory": "Αυτή είναι η περίληψη της μέχρι τώρα δουλειάς σας:\n{chat_history}",
|
11 |
+
"role_playing": "Είσαι {role}.\n{backstory}\n\nΟ προσωπικός σας στόχος είναι: {goal}",
|
12 |
+
"tools": "ΕΡΓΑΛΕΙΑ:\n------\nΈχετε πρόσβαση μόνο στα ακόλουθα εργαλεία:\n\n{tools}\n\nΓια να χρησιμοποιήσετε ένα εργαλείο, χρησιμοποιήστε την ακόλουθη ακριβώς μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Ναί\nΔράση: η ενέργεια που πρέπει να γίνει, πρέπει να είναι μία από τις[{tool_names}], μόνο το όνομα.\nΕνέργεια προς εισαγωγή: η είσοδος στη δράση\nΠαρατήρηση: το αποτέλεσμα της δράσης\n```\n\nΌταν έχετε μια απάντηση για την εργασία σας ή εάν δεν χρειάζεται να χρησιμοποιήσετε ένα εργαλείο, ΠΡΕΠΕΙ να χρησιμοποιήσετε τη μορφή:\n\n```\nΣκέψη: Χρειάζεται να χρησιμοποιήσω κάποιο εργαλείο; Οχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
13 |
+
"task_with_context": "{task}\nΑυτό είναι το πλαίσιο με το οποίο εργάζεστε:\n{context}",
|
14 |
+
"expected_output": "Η τελική σας απάντηση πρέπει να είναι: {expected_output}"
|
15 |
+
},
|
16 |
+
"errors": {
|
17 |
+
"force_final_answer": "Στην πραγματικότητα, χρησιμοποίησα πάρα πολλά εργαλεία, οπότε θα σταματήσω τώρα και θα σας δώσω την απόλυτη ΚΑΛΥΤΕΡΗ τελική μου απάντηση ΤΩΡΑ, χρησιμοποιώντας την αναμενόμενη μορφή: ```\nΣκέφτηκα: Χρειάζεται να χρησιμοποιήσω ένα εργαλείο; Όχι\nΤελική απάντηση: [η απάντησή σας εδώ]```",
|
18 |
+
"agent_tool_missing_param": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Λείπουν ακριβώς 3 διαχωρισμένες τιμές σωλήνων (|). Για παράδειγμα, `coworker|task|context`. Πρέπει να φροντίσω να περάσω το πλαίσιο ως πλαίσιο.\n",
|
19 |
+
"agent_tool_unexsiting_coworker": "\nΣφάλμα κατά την εκτέλεση του εργαλείου. Ο συνάδελφος που αναφέρεται στο Ενέργεια προς εισαγωγή δεν βρέθηκε, πρέπει να είναι μία από τις ακόλουθες επιλογές: {coworkers}.\n",
|
20 |
+
"task_repeated_usage": "Μόλις χρησιμοποίησα το {tool} εργαλείο με είσοδο {tool_input}. Άρα ξέρω ήδη το αποτέλεσμα αυτού και δεν χρειάζεται να το χρησιμοποιήσω τώρα.\n"
|
21 |
+
},
|
22 |
+
"tools": {
|
23 |
+
"delegate_work": "Χρήσιμο για την ανάθεση μιας συγκεκριμένης εργασίας σε έναν από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να του ζητήσετε (μία από τις επιλογές), την εργασία και όλο το πραγματικό πλαί��ιο που έχετε για την εργασία .\nΓια παράδειγμα, `coworker|task|context`.",
|
24 |
+
"ask_question": "Χρήσιμο για να κάνετε μια ερώτηση, γνώμη ή αποδοχή από τους παρακάτω συναδέλφους: {coworkers}.\nΗ είσοδος σε αυτό το εργαλείο θα πρέπει να είναι ένα κείμενο χωρισμένο σε σωλήνα (|) μήκους 3 (τρία), που αντιπροσωπεύει τον συνάδελφο στον οποίο θέλετε να το ρωτήσετε (μία από τις επιλογές), την ερώτηση και όλο το πραγματικό πλαίσιο που έχετε για την ερώτηση.\nΓια παράδειγμα, `coworker|question|context`."
|
25 |
+
}
|
26 |
+
}
|
crewai/translations/en.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"hierarchical_manager_agent": {
|
3 |
+
"role": "Crew Manager",
|
4 |
+
"goal": "Manage the team to complete the task in the best way possible.",
|
5 |
+
"backstory": "You are a seasoned manager with a knack for getting the best out of your team.\nYou are also known for your ability to delegate work to the right people, and to ask the right questions to get the best out of your team.\nEven though you don't perform tasks by yourself, you have a lot of experience in the field, which allows you to properly evaluate the work of your team members."
|
6 |
+
},
|
7 |
+
"slices": {
|
8 |
+
"observation": "\nObservation",
|
9 |
+
"task": "Begin! This is VERY important to you, your job depends on it!\n\nCurrent Task: {input}",
|
10 |
+
"memory": "This is the summary of your work so far:\n{chat_history}",
|
11 |
+
"role_playing": "You are {role}.\n{backstory}\n\nYour personal goal is: {goal}",
|
12 |
+
"tools": "TOOLS:\n------\nYou have access to only the following tools:\n\n{tools}\n\nTo use a tool, please use the exact following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}], just the name.\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response for your task, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
13 |
+
"task_with_context": "{task}\nThis is the context you're working with:\n{context}",
|
14 |
+
"expected_output": "Your final answer must be: {expected_output}"
|
15 |
+
},
|
16 |
+
"errors": {
|
17 |
+
"force_final_answer": "Actually, I used too many tools, so I'll stop now and give you my absolute BEST Final answer NOW, using the expected format: ```\nThought: Do I need to use a tool? No\nFinal Answer: [your response here]```",
|
18 |
+
"agent_tool_missing_param": "\nError executing tool. Missing exact 3 pipe (|) separated values. For example, `coworker|task|context`. I need to make sure to pass context as context.\n",
|
19 |
+
"agent_tool_unexsiting_coworker": "\nError executing tool. Co-worker mentioned on the Action Input not found, it must to be one of the following options: {coworkers}.\n",
|
20 |
+
"task_repeated_usage": "I just used the {tool} tool with input {tool_input}. So I already know the result of that and don't need to use it now.\n"
|
21 |
+
},
|
22 |
+
"tools": {
|
23 |
+
"delegate_work": "Useful to delegate a specific task to one of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the task and all actual context you have for the task.\nFor example, `coworker|task|context`.",
|
24 |
+
"ask_question": "Useful to ask a question, opinion or take from on of the following co-workers: {coworkers}.\nThe input to this tool should be a pipe (|) separated text of length 3 (three), representing the co-worker you want to ask it to (one of the options), the question and all actual context you have for the question.\n For example, `coworker|question|context`."
|
25 |
+
}
|
26 |
+
}
|
crewai/utilities/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .i18n import I18N
|
2 |
+
from .logger import Logger
|
3 |
+
from .prompts import Prompts
|
4 |
+
from .rpm_controller import RPMController
|
crewai/utilities/i18n.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import Dict, Optional
|
4 |
+
|
5 |
+
from pydantic import BaseModel, Field, PrivateAttr, ValidationError, model_validator
|
6 |
+
|
7 |
+
|
8 |
+
class I18N(BaseModel):
|
9 |
+
_translations: Dict[str, Dict[str, str]] = PrivateAttr()
|
10 |
+
language: Optional[str] = Field(
|
11 |
+
default="en",
|
12 |
+
description="Language used to load translations",
|
13 |
+
)
|
14 |
+
|
15 |
+
@model_validator(mode="after")
|
16 |
+
def load_translation(self) -> "I18N":
|
17 |
+
"""Load translations from a JSON file based on the specified language."""
|
18 |
+
try:
|
19 |
+
dir_path = os.path.dirname(os.path.realpath(__file__))
|
20 |
+
prompts_path = os.path.join(
|
21 |
+
dir_path, f"../translations/{self.language}.json"
|
22 |
+
)
|
23 |
+
|
24 |
+
with open(prompts_path, "r") as f:
|
25 |
+
self._translations = json.load(f)
|
26 |
+
except FileNotFoundError:
|
27 |
+
raise ValidationError(
|
28 |
+
f"Translation file for language '{self.language}' not found."
|
29 |
+
)
|
30 |
+
except json.JSONDecodeError:
|
31 |
+
raise ValidationError(f"Error decoding JSON from the prompts file.")
|
32 |
+
|
33 |
+
if not self._translations:
|
34 |
+
self._translations = {}
|
35 |
+
|
36 |
+
return self
|
37 |
+
|
38 |
+
def slice(self, slice: str) -> str:
|
39 |
+
return self.retrieve("slices", slice)
|
40 |
+
|
41 |
+
def errors(self, error: str) -> str:
|
42 |
+
return self.retrieve("errors", error)
|
43 |
+
|
44 |
+
def tools(self, error: str) -> str:
|
45 |
+
return self.retrieve("tools", error)
|
46 |
+
|
47 |
+
def retrieve(self, kind, key) -> str:
|
48 |
+
try:
|
49 |
+
return self._translations[kind][key]
|
50 |
+
except:
|
51 |
+
raise ValidationError(f"Translation for '{kind}':'{key}' not found.")
|
crewai/utilities/logger.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class Logger:
|
2 |
+
def __init__(self, verbose_level=0):
|
3 |
+
verbose_level = (
|
4 |
+
2 if isinstance(verbose_level, bool) and verbose_level else verbose_level
|
5 |
+
)
|
6 |
+
self.verbose_level = verbose_level
|
7 |
+
|
8 |
+
def log(self, level, message):
|
9 |
+
level_map = {"debug": 1, "info": 2}
|
10 |
+
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
|
11 |
+
print(f"[{level.upper()}]: {message}")
|
crewai/utilities/prompts.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import ClassVar
|
2 |
+
|
3 |
+
from langchain.prompts import PromptTemplate, BasePromptTemplate
|
4 |
+
from pydantic import BaseModel, Field
|
5 |
+
|
6 |
+
from crewai.utilities import I18N
|
7 |
+
|
8 |
+
|
9 |
+
class Prompts(BaseModel):
|
10 |
+
"""Manages and generates prompts for a generic agent with support for different languages."""
|
11 |
+
|
12 |
+
i18n: I18N = Field(default=I18N())
|
13 |
+
|
14 |
+
SCRATCHPAD_SLICE: ClassVar[str] = "\n{agent_scratchpad}"
|
15 |
+
|
16 |
+
def task_execution_with_memory(self) -> BasePromptTemplate:
|
17 |
+
"""Generate a prompt for task execution with memory components."""
|
18 |
+
return self._build_prompt(["role_playing", "tools", "memory", "task"])
|
19 |
+
|
20 |
+
def task_execution_without_tools(self) -> BasePromptTemplate:
|
21 |
+
"""Generate a prompt for task execution without tools components."""
|
22 |
+
return self._build_prompt(["role_playing", "task"])
|
23 |
+
|
24 |
+
def task_execution(self) -> BasePromptTemplate:
|
25 |
+
"""Generate a standard prompt for task execution."""
|
26 |
+
return self._build_prompt(["role_playing", "tools", "task"])
|
27 |
+
|
28 |
+
def _build_prompt(self, components: list[str]) -> BasePromptTemplate:
|
29 |
+
"""Constructs a prompt string from specified components."""
|
30 |
+
prompt_parts = [self.i18n.slice(component) for component in components]
|
31 |
+
prompt_parts.append(self.SCRATCHPAD_SLICE)
|
32 |
+
return PromptTemplate.from_template("".join(prompt_parts))
|
crewai/utilities/rpm_controller.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import threading
|
2 |
+
import time
|
3 |
+
from typing import Union
|
4 |
+
|
5 |
+
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
|
6 |
+
|
7 |
+
from crewai.utilities.logger import Logger
|
8 |
+
|
9 |
+
|
10 |
+
class RPMController(BaseModel):
|
11 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
12 |
+
max_rpm: Union[int, None] = Field(default=None)
|
13 |
+
logger: Logger = Field(default=None)
|
14 |
+
_current_rpm: int = PrivateAttr(default=0)
|
15 |
+
_timer: threading.Timer | None = PrivateAttr(default=None)
|
16 |
+
_lock: threading.Lock = PrivateAttr(default=None)
|
17 |
+
_shutdown_flag = False
|
18 |
+
|
19 |
+
@model_validator(mode="after")
|
20 |
+
def reset_counter(self):
|
21 |
+
if self.max_rpm:
|
22 |
+
if not self._shutdown_flag:
|
23 |
+
self._lock = threading.Lock()
|
24 |
+
self._reset_request_count()
|
25 |
+
return self
|
26 |
+
|
27 |
+
def check_or_wait(self):
|
28 |
+
if not self.max_rpm:
|
29 |
+
return True
|
30 |
+
|
31 |
+
with self._lock:
|
32 |
+
if self._current_rpm < self.max_rpm:
|
33 |
+
self._current_rpm += 1
|
34 |
+
return True
|
35 |
+
else:
|
36 |
+
self.logger.log(
|
37 |
+
"info", "Max RPM reached, waiting for next minute to start."
|
38 |
+
)
|
39 |
+
self._wait_for_next_minute()
|
40 |
+
self._current_rpm = 1
|
41 |
+
return True
|
42 |
+
|
43 |
+
def stop_rpm_counter(self):
|
44 |
+
if self._timer:
|
45 |
+
self._timer.cancel()
|
46 |
+
self._timer = None
|
47 |
+
|
48 |
+
def _wait_for_next_minute(self):
|
49 |
+
time.sleep(60)
|
50 |
+
self._current_rpm = 0
|
51 |
+
|
52 |
+
def _reset_request_count(self):
|
53 |
+
with self._lock:
|
54 |
+
self._current_rpm = 0
|
55 |
+
if self._timer:
|
56 |
+
self._shutdown_flag = True
|
57 |
+
self._timer.cancel()
|
58 |
+
self._timer = threading.Timer(60.0, self._reset_request_count)
|
59 |
+
self._timer.start()
|