code
stringlengths 141
78.9k
| apis
sequencelengths 1
23
| extract_api
stringlengths 142
73.2k
|
---|---|---|
"""
AI Module
This module provides an AI class that interfaces with language models to perform various tasks such as
starting a conversation, advancing the conversation, and handling message serialization. It also includes
backoff strategies for handling rate limit errors from the OpenAI API.
Classes:
AI: A class that interfaces with language models for conversation management and message serialization.
Functions:
serialize_messages(messages: List[Message]) -> str
Serialize a list of messages to a JSON string.
"""
from __future__ import annotations
import json
import logging
import os
from pathlib import Path
from typing import List, Optional, Union
import backoff
import openai
import pyperclip
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
messages_from_dict,
messages_to_dict,
)
from langchain_anthropic import ChatAnthropic
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from gpt_engineer.core.token_usage import TokenUsageLog
# Type hint for a chat message
Message = Union[AIMessage, HumanMessage, SystemMessage]
# Set up logging
logger = logging.getLogger(__name__)
class AI:
"""
A class that interfaces with language models for conversation management and message serialization.
This class provides methods to start and advance conversations, handle message serialization,
and implement backoff strategies for rate limit errors when interacting with the OpenAI API.
Attributes
----------
temperature : float
The temperature setting for the language model.
azure_endpoint : str
The endpoint URL for the Azure-hosted language model.
model_name : str
The name of the language model to use.
streaming : bool
A flag indicating whether to use streaming for the language model.
llm : BaseChatModel
The language model instance for conversation management.
token_usage_log : TokenUsageLog
A log for tracking token usage during conversations.
Methods
-------
start(system: str, user: str, step_name: str) -> List[Message]
Start the conversation with a system message and a user message.
next(messages: List[Message], prompt: Optional[str], step_name: str) -> List[Message]
Advances the conversation by sending message history to LLM and updating with the response.
backoff_inference(messages: List[Message]) -> Any
Perform inference using the language model with an exponential backoff strategy.
serialize_messages(messages: List[Message]) -> str
Serialize a list of messages to a JSON string.
deserialize_messages(jsondictstr: str) -> List[Message]
Deserialize a JSON string to a list of messages.
_create_chat_model() -> BaseChatModel
Create a chat model with the specified model name and temperature.
"""
def __init__(
self,
model_name="gpt-4-1106-preview",
temperature=0.1,
azure_endpoint="",
streaming=True,
):
"""
Initialize the AI class.
Parameters
----------
model_name : str, optional
The name of the model to use, by default "gpt-4".
temperature : float, optional
The temperature to use for the model, by default 0.1.
"""
self.temperature = temperature
self.azure_endpoint = azure_endpoint
self.model_name = model_name
self.streaming = streaming
self.llm = self._create_chat_model()
self.token_usage_log = TokenUsageLog(model_name)
logger.debug(f"Using model {self.model_name}")
def start(self, system: str, user: str, step_name: str) -> List[Message]:
"""
Start the conversation with a system message and a user message.
Parameters
----------
system : str
The content of the system message.
user : str
The content of the user message.
step_name : str
The name of the step.
Returns
-------
List[Message]
The list of messages in the conversation.
"""
messages: List[Message] = [
SystemMessage(content=system),
HumanMessage(content=user),
]
return self.next(messages, step_name=step_name)
def next(
self,
messages: List[Message],
prompt: Optional[str] = None,
*,
step_name: str,
) -> List[Message]:
"""
Advances the conversation by sending message history
to LLM and updating with the response.
Parameters
----------
messages : List[Message]
The list of messages in the conversation.
prompt : Optional[str], optional
The prompt to use, by default None.
step_name : str
The name of the step.
Returns
-------
List[Message]
The updated list of messages in the conversation.
"""
if prompt:
messages.append(HumanMessage(content=prompt))
logger.debug(f"Creating a new chat completion: {messages}")
messages = self._collapse_messages(messages)
response = self.backoff_inference(messages)
self.token_usage_log.update_log(
messages=messages, answer=response.content, step_name=step_name
)
messages.append(response)
logger.debug(f"Chat completion finished: {messages}")
return messages
def _collapse_messages(self, messages: List[Message]):
"""
Combine consecutive messages of the same type into a single message.
This method iterates through the list of messages, combining consecutive messages of the same type
by joining their content with a newline character. This reduces the number of messages and simplifies
the conversation for processing.
Parameters
----------
messages : List[Message]
The list of messages to collapse.
Returns
-------
List[Message]
The list of messages after collapsing consecutive messages of the same type.
"""
collapsed_messages = []
if not messages:
return collapsed_messages
previous_message = messages[0]
combined_content = previous_message.content
for current_message in messages[1:]:
if current_message.type == previous_message.type:
combined_content += "\n\n" + current_message.content
else:
collapsed_messages.append(
previous_message.__class__(content=combined_content)
)
previous_message = current_message
combined_content = current_message.content
collapsed_messages.append(previous_message.__class__(content=combined_content))
return collapsed_messages
@backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45)
def backoff_inference(self, messages):
"""
Perform inference using the language model while implementing an exponential backoff strategy.
This function will retry the inference in case of a rate limit error from the OpenAI API.
It uses an exponential backoff strategy, meaning the wait time between retries increases
exponentially. The function will attempt to retry up to 7 times within a span of 45 seconds.
Parameters
----------
messages : List[Message]
A list of chat messages which will be passed to the language model for processing.
callbacks : List[Callable]
A list of callback functions that are triggered after each inference. These functions
can be used for logging, monitoring, or other auxiliary tasks.
Returns
-------
Any
The output from the language model after processing the provided messages.
Raises
------
openai.error.RateLimitError
If the number of retries exceeds the maximum or if the rate limit persists beyond the
allotted time, the function will ultimately raise a RateLimitError.
Example
-------
>>> messages = [SystemMessage(content="Hello"), HumanMessage(content="How's the weather?")]
>>> response = backoff_inference(messages)
"""
return self.llm.invoke(messages) # type: ignore
@staticmethod
def serialize_messages(messages: List[Message]) -> str:
"""
Serialize a list of messages to a JSON string.
Parameters
----------
messages : List[Message]
The list of messages to serialize.
Returns
-------
str
The serialized messages as a JSON string.
"""
return json.dumps(messages_to_dict(messages))
@staticmethod
def deserialize_messages(jsondictstr: str) -> List[Message]:
"""
Deserialize a JSON string to a list of messages.
Parameters
----------
jsondictstr : str
The JSON string to deserialize.
Returns
-------
List[Message]
The deserialized list of messages.
"""
data = json.loads(jsondictstr)
# Modify implicit is_chunk property to ALWAYS false
# since Langchain's Message schema is stricter
prevalidated_data = [
{**item, "tools": {**item.get("tools", {}), "is_chunk": False}}
for item in data
]
return list(messages_from_dict(prevalidated_data)) # type: ignore
def _create_chat_model(self) -> BaseChatModel:
"""
Create a chat model with the specified model name and temperature.
Parameters
----------
model : str
The name of the model to create.
temperature : float
The temperature to use for the model.
Returns
-------
BaseChatModel
The created chat model.
"""
if self.azure_endpoint:
return AzureChatOpenAI(
azure_endpoint=self.azure_endpoint,
openai_api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"),
deployment_name=self.model_name,
openai_api_type="azure",
streaming=self.streaming,
callbacks=[StreamingStdOutCallbackHandler()],
)
if "claude" in self.model_name:
return ChatAnthropic(
model=self.model_name,
temperature=self.temperature,
callbacks=[StreamingStdOutCallbackHandler()],
max_tokens_to_sample=4096,
)
return ChatOpenAI(
model=self.model_name,
temperature=self.temperature,
streaming=self.streaming,
callbacks=[StreamingStdOutCallbackHandler()],
)
def serialize_messages(messages: List[Message]) -> str:
return AI.serialize_messages(messages)
class ClipboardAI(AI):
# Ignore not init superclass
def __init__(self, **_): # type: ignore
pass
@staticmethod
def serialize_messages(messages: List[Message]) -> str:
return "\n\n".join([f"{m.type}:\n{m.content}" for m in messages])
@staticmethod
def multiline_input():
print("Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.")
content = []
while True:
try:
line = input()
except EOFError:
break
content.append(line)
return "\n".join(content)
def next(
self,
messages: List[Message],
prompt: Optional[str] = None,
*,
step_name: str,
) -> List[Message]:
"""
Not yet fully supported
"""
if prompt:
messages.append(HumanMessage(content=prompt))
logger.debug(f"Creating a new chat completion: {messages}")
msgs = self.serialize_messages(messages)
pyperclip.copy(msgs)
Path("clipboard.txt").write_text(msgs)
print(
"Messages copied to clipboard and written to clipboard.txt,",
len(msgs),
"characters in total",
)
response = self.multiline_input()
messages.append(AIMessage(content=response))
logger.debug(f"Chat completion finished: {messages}")
return messages
| [
"langchain.schema.AIMessage",
"langchain.schema.messages_to_dict",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage",
"langchain.schema.messages_from_dict",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler"
] | [((1266, 1293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1283, 1293), False, 'import logging\n'), ((7101, 7188), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'openai.RateLimitError'], {'max_tries': '(7)', 'max_time': '(45)'}), '(backoff.expo, openai.RateLimitError, max_tries=7,\n max_time=45)\n', (7121, 7188), False, 'import backoff\n'), ((3698, 3723), 'gpt_engineer.core.token_usage.TokenUsageLog', 'TokenUsageLog', (['model_name'], {}), '(model_name)\n', (3711, 3723), False, 'from gpt_engineer.core.token_usage import TokenUsageLog\n'), ((9467, 9490), 'json.loads', 'json.loads', (['jsondictstr'], {}), '(jsondictstr)\n', (9477, 9490), False, 'import json\n'), ((12276, 12296), 'pyperclip.copy', 'pyperclip.copy', (['msgs'], {}), '(msgs)\n', (12290, 12296), False, 'import pyperclip\n'), ((4343, 4372), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system'}), '(content=system)\n', (4356, 4372), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((4386, 4412), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user'}), '(content=user)\n', (4398, 4412), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9048, 9074), 'langchain.schema.messages_to_dict', 'messages_to_dict', (['messages'], {}), '(messages)\n', (9064, 9074), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((9771, 9808), 'langchain.schema.messages_from_dict', 'messages_from_dict', (['prevalidated_data'], {}), '(prevalidated_data)\n', (9789, 9808), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12569, 12596), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response'}), '(content=response)\n', (12578, 12596), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((5209, 5237), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (5221, 5237), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12119, 12147), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'prompt'}), '(content=prompt)\n', (12131, 12147), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage, messages_from_dict, messages_to_dict\n'), ((12305, 12326), 'pathlib.Path', 'Path', (['"""clipboard.txt"""'], {}), "('clipboard.txt')\n", (12309, 12326), False, 'from pathlib import Path\n'), ((10405, 10450), 'os.getenv', 'os.getenv', (['"""OPENAI_API_VERSION"""', '"""2023-05-15"""'], {}), "('OPENAI_API_VERSION', '2023-05-15')\n", (10414, 10450), False, 'import os\n'), ((11105, 11137), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (11135, 11137), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10611, 10643), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10641, 10643), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((10847, 10879), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (10877, 10879), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import asyncio
from langchain.prompts import PromptTemplate
from server.utils import get_prompt_template
async def completion(query: str = Body(..., description="用户输入", examples=["恼羞成怒"]),
stream: bool = Body(False, description="流式输出"),
echo: bool = Body(False, description="除了输出之外,还回显输入"),
model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
max_tokens: Optional[int] = Body(1024, description="限制LLM生成Token数量,默认None代表模型最大值"),
# top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
prompt_name: str = Body("default",
description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
):
#todo 因ApiModelWorker 默认是按chat处理的,会对params["prompt"] 解析为messages,因此ApiModelWorker 使用时需要有相应处理
async def completion_iterator(query: str,
model_name: str = LLM_MODELS[0],
prompt_name: str = prompt_name,
echo: bool = echo,
) -> AsyncIterable[str]:
nonlocal max_tokens
callback = AsyncIteratorCallbackHandler()
if isinstance(max_tokens, int) and max_tokens <= 0:
max_tokens = None
model = get_OpenAI(
model_name=model_name,
temperature=temperature,
max_tokens=max_tokens,
callbacks=[callback],
echo=echo
)
prompt_template = get_prompt_template("completion", prompt_name)
prompt = PromptTemplate.from_template(prompt_template)
chain = LLMChain(prompt=prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"input": query}),
callback.done),
)
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield token
else:
answer = ""
async for token in callback.aiter():
answer += token
yield answer
await task
return EventSourceResponse(completion_iterator(query=query,
model_name=model_name,
prompt_name=prompt_name),
)
| [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", (540, 567), False, 'from fastapi import Body\n'), ((603, 642), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""除了输出之外,还回显输入"""'}), "(False, description='除了输出之外,还回显输入')\n", (607, 642), False, 'from fastapi import Body\n'), ((683, 727), 'fastapi.Body', 'Body', (['LLM_MODELS[0]'], {'description': '"""LLM 模型名称。"""'}), "(LLM_MODELS[0], description='LLM 模型名称。')\n", (687, 727), False, 'from fastapi import Body\n'), ((771, 828), 'fastapi.Body', 'Body', (['TEMPERATURE'], {'description': '"""LLM 采样温度"""', 'ge': '(0.0)', 'le': '(1.0)'}), "(TEMPERATURE, description='LLM 采样温度', ge=0.0, le=1.0)\n", (775, 828), False, 'from fastapi import Body\n'), ((879, 933), 'fastapi.Body', 'Body', (['(1024)'], {'description': '"""限制LLM生成Token数量,默认None代表模型最大值"""'}), "(1024, description='限制LLM生成Token数量,默认None代表模型最大值')\n", (883, 933), False, 'from fastapi import Body\n'), ((1083, 1157), 'fastapi.Body', 'Body', (['"""default"""'], {'description': '"""使用的prompt模板名称(在configs/prompt_config.py中配置)"""'}), "('default', description='使用的prompt模板名称(在configs/prompt_config.py中配置)')\n", (1087, 1157), False, 'from fastapi import Body\n'), ((1664, 1694), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (1692, 1694), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((1802, 1921), 'server.utils.get_OpenAI', 'get_OpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'callbacks': '[callback]', 'echo': 'echo'}), '(model_name=model_name, temperature=temperature, max_tokens=\n max_tokens, callbacks=[callback], echo=echo)\n', (1812, 1921), False, 'from server.utils import wrap_done, get_OpenAI\n'), ((2014, 2060), 'server.utils.get_prompt_template', 'get_prompt_template', (['"""completion"""', 'prompt_name'], {}), "('completion', prompt_name)\n", (2033, 2060), False, 'from server.utils import get_prompt_template\n'), ((2078, 2123), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2106, 2123), False, 'from langchain.prompts import PromptTemplate\n'), ((2140, 2174), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'model'}), '(prompt=prompt, llm=model)\n', (2148, 2174), False, 'from langchain.chains import LLMChain\n')] |
# — coding: utf-8 –
import openai
import json
import logging
import sys
import argparse
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain import LLMChain
import numpy as np
import requests
import os
import subprocess
import re
import importlib.util
from sklearn.metrics.pairwise import cosine_similarity
import pickle
from util import *
from tqdm import tqdm
openai.api_key = os.environ["OPENAI_API_KEY"]
def get_last_processed_index(progress_file):
"""Retrieve the last processed index from the progress file."""
if os.path.exists(progress_file):
with open(progress_file, 'r', encoding='utf-8') as f:
last_index = f.read().strip()
return int(last_index) if last_index else 0
else:
return 0
def update_progress(progress_file, index):
"""Update the last processed index in the progress file."""
with open(progress_file, 'w', encoding='utf-8') as f:
f.write(str(index))
def task_decompose(question, Tool_dic, model_name):
chat = ChatOpenAI(model_name=model_name)
template = "You are a helpful assistant."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_message_prompt = HumanMessagePromptTemplate.from_template(
"We have spotify database and the following tools:\n"
"{Tool_dic}"
"You need to decompose a complex user's question into some simple subtasks and let the model execute it step by step with these tools.\n"
"Please note that: \n"
"1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n"
"2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n"
"3. If you think you do not need to use the tool to solve the subtask, just leave it as {{\"ID\": -1}}\n"
"4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path."
"5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n"
"'''\n"
"Question: Pause the player"
"Example 1: [{{\"Task\":\"Get information about the user’s current playback state\", \"ID\":15}}, {{\"Task\":\"Pause playback on the user's account\", \"ID\":19}}]\n"
"'''\n"
"This is the user's question: {question}\n"
"Output:"
)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(llm=chat, prompt=chat_prompt)
ind = 0
while True:
try:
result = chain.run(question=question, Tool_dic=Tool_dic)
result = eval(result.split('\n\n')[0])
break
except Exception as e:
print(f"task decompose fails: {e}")
if ind > 10:
return -1
ind += 1
continue
return result
def task_execution(
Tool_dic, dic_tool, test_data, progress_file,
start_index, total_files, retrieval_num, ind, model_name):
with tqdm(total=total_files, desc="Processing files", initial=start_index) as pbar:
for i, data in enumerate(test_data[start_index:], start=start_index):
question = data["query"]
print(question)
task_path = task_decompose(question, Tool_dic, model_name)
tool_choice_ls = []
for task in task_path:
if isinstance(task["ID"], list):
for ele in task["ID"]:
tool_choice_ls.append(dic_tool[ele]['tool_usage'])
elif int(task["ID"]) in dic_tool.keys():
tool_choice_ls.append(dic_tool[task["ID"]]['tool_usage'])
ind = ind + 1
with open(f"restbench_{model_name}_Easytool.jsonl", 'a+', encoding='utf-8') as f:
line = json.dumps({
"ID": ind,
"question": question,
"task_path": task_path,
"tool_choice_ls": tool_choice_ls
}, ensure_ascii=False)
f.write(line + '\n')
print(tool_choice_ls)
update_progress(progress_file, i + 1)
pbar.update(1)
| [
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.LLMChain"
] | [((717, 746), 'os.path.exists', 'os.path.exists', (['progress_file'], {}), '(progress_file)\n', (731, 746), False, 'import os\n'), ((1210, 1243), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1220, 1243), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1320, 1371), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (1361, 1371), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1400, 2422), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""'], {}), '(\n """We have spotify database and the following tools:\n{Tool_dic}You need to decompose a complex user\'s question into some simple subtasks and let the model execute it step by step with these tools.\nPlease note that: \n1. you should break down tasks into appropriate subtasks to use the tools mentioned above.\n2. You should not only list the subtask, but also list the ID of the tool used to solve this subtask.\n3. If you think you do not need to use the tool to solve the subtask, just leave it as {{"ID": -1}}\n4. You must consider the logical connections, order and constraints among the tools to achieve a correct tool path.5. You must ONLY output the ID of the tool you chose in a parsible JSON format. Two examples output look like:\n\'\'\'\nQuestion: Pause the playerExample 1: [{{"Task":"Get information about the user’s current playback state", "ID":15}}, {{"Task":"Pause playback on the user\'s account", "ID":19}}]\n\'\'\'\nThis is the user\'s question: {question}\nOutput:"""\n )\n', (1440, 2422), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2637, 2716), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (2669, 2716), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2730, 2768), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt'}), '(llm=chat, prompt=chat_prompt)\n', (2738, 2768), False, 'from langchain import LLMChain\n'), ((3309, 3378), 'tqdm.tqdm', 'tqdm', ([], {'total': 'total_files', 'desc': '"""Processing files"""', 'initial': 'start_index'}), "(total=total_files, desc='Processing files', initial=start_index)\n", (3313, 3378), False, 'from tqdm import tqdm\n'), ((4128, 4255), 'json.dumps', 'json.dumps', (["{'ID': ind, 'question': question, 'task_path': task_path, 'tool_choice_ls':\n tool_choice_ls}"], {'ensure_ascii': '(False)'}), "({'ID': ind, 'question': question, 'task_path': task_path,\n 'tool_choice_ls': tool_choice_ls}, ensure_ascii=False)\n", (4138, 4255), False, 'import json\n')] |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
import os
from pathlib import Path
from typing import Union
import cloudpickle
import yaml
from mlflow.exceptions import MlflowException
from mlflow.langchain.utils import (
_BASE_LOAD_KEY,
_CONFIG_LOAD_KEY,
_MODEL_DATA_FOLDER_NAME,
_MODEL_DATA_KEY,
_MODEL_DATA_PKL_FILE_NAME,
_MODEL_DATA_YAML_FILE_NAME,
_MODEL_LOAD_KEY,
_MODEL_TYPE_KEY,
_RUNNABLE_LOAD_KEY,
_UNSUPPORTED_MODEL_ERROR_MESSAGE,
_load_base_lcs,
_load_from_json,
_load_from_pickle,
_load_from_yaml,
_save_base_lcs,
_validate_and_wrap_lc_model,
base_lc_types,
custom_type_to_loader_dict,
lc_runnable_assign_types,
lc_runnable_branch_types,
lc_runnable_with_steps_types,
lc_runnables_types,
picklable_runnable_types,
)
_STEPS_FOLDER_NAME = "steps"
_RUNNABLE_STEPS_FILE_NAME = "steps.yaml"
_BRANCHES_FOLDER_NAME = "branches"
_MAPPER_FOLDER_NAME = "mapper"
_RUNNABLE_BRANCHES_FILE_NAME = "branches.yaml"
_DEFAULT_BRANCH_NAME = "default"
def _load_model_from_config(path, model_config):
from langchain.chains.loading import type_to_loader_dict as chains_type_to_loader_dict
from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict
try:
from langchain.prompts.loading import type_to_loader_dict as prompts_types
except ImportError:
prompts_types = {"prompt", "few_shot_prompt"}
config_path = os.path.join(path, model_config.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME))
# Load runnables from config file
if config_path.endswith(".yaml"):
config = _load_from_yaml(config_path)
elif config_path.endswith(".json"):
config = _load_from_json(config_path)
else:
raise MlflowException(
f"Cannot load runnable without a config file. Got path {config_path}."
)
_type = config.get("_type")
if _type in chains_type_to_loader_dict:
from langchain.chains.loading import load_chain
return load_chain(config_path)
elif _type in prompts_types:
from langchain.prompts.loading import load_prompt
return load_prompt(config_path)
elif _type in llms_get_type_to_cls_dict():
from langchain.llms.loading import load_llm
return load_llm(config_path)
elif _type in custom_type_to_loader_dict():
return custom_type_to_loader_dict()[_type](config)
raise MlflowException(f"Unsupported type {_type} for loading.")
def _load_model_from_path(path: str, model_config=None):
model_load_fn = model_config.get(_MODEL_LOAD_KEY)
if model_load_fn == _RUNNABLE_LOAD_KEY:
return _load_runnables(path, model_config)
if model_load_fn == _BASE_LOAD_KEY:
return _load_base_lcs(path, model_config)
if model_load_fn == _CONFIG_LOAD_KEY:
return _load_model_from_config(path, model_config)
raise MlflowException(f"Unsupported model load key {model_load_fn}")
def _load_runnable_with_steps(file_path: Union[Path, str], model_type: str):
"""Load the model
Args:
file_path: Path to file to load the model from.
model_type: Type of the model to load.
"""
from langchain.schema.runnable import RunnableParallel, RunnableSequence
# Convert file to Path object.
load_path = Path(file_path)
if not load_path.exists() or not load_path.is_dir():
raise MlflowException(
f"File {load_path} must exist and must be a directory "
"in order to load runnable with steps."
)
steps_conf_file = load_path / _RUNNABLE_STEPS_FILE_NAME
if not steps_conf_file.exists():
raise MlflowException(
f"File {steps_conf_file} must exist in order to load runnable with steps."
)
steps_conf = _load_from_yaml(steps_conf_file)
steps_path = load_path / _STEPS_FOLDER_NAME
if not steps_path.exists() or not steps_path.is_dir():
raise MlflowException(
f"Folder {steps_path} must exist and must be a directory "
"in order to load runnable with steps."
)
steps = {}
# ignore hidden files
for step in (f for f in os.listdir(steps_path) if not f.startswith(".")):
config = steps_conf.get(step)
# load model from the folder of the step
runnable = _load_model_from_path(os.path.join(steps_path, step), config)
steps[step] = runnable
if model_type == RunnableSequence.__name__:
steps = [value for _, value in sorted(steps.items(), key=lambda item: int(item[0]))]
return runnable_sequence_from_steps(steps)
if model_type == RunnableParallel.__name__:
return RunnableParallel(steps)
def runnable_sequence_from_steps(steps):
"""Construct a RunnableSequence from steps.
Args:
steps: List of steps to construct the RunnableSequence from.
"""
from langchain.schema.runnable import RunnableSequence
if len(steps) < 2:
raise ValueError(f"RunnableSequence must have at least 2 steps, got {len(steps)}.")
first, *middle, last = steps
return RunnableSequence(first=first, middle=middle, last=last)
def _load_runnable_branch(file_path: Union[Path, str]):
"""Load the model
Args:
file_path: Path to file to load the model from.
"""
from langchain.schema.runnable import RunnableBranch
# Convert file to Path object.
load_path = Path(file_path)
if not load_path.exists() or not load_path.is_dir():
raise MlflowException(
f"File {load_path} must exist and must be a directory "
"in order to load runnable with steps."
)
branches_conf_file = load_path / _RUNNABLE_BRANCHES_FILE_NAME
if not branches_conf_file.exists():
raise MlflowException(
f"File {branches_conf_file} must exist in order to load runnable with steps."
)
branches_conf = _load_from_yaml(branches_conf_file)
branches_path = load_path / _BRANCHES_FOLDER_NAME
if not branches_path.exists() or not branches_path.is_dir():
raise MlflowException(
f"Folder {branches_path} must exist and must be a directory "
"in order to load runnable with steps."
)
branches = []
for branch in os.listdir(branches_path):
# load model from the folder of the branch
if branch == _DEFAULT_BRANCH_NAME:
default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
default = _load_model_from_path(
default_branch_path, branches_conf.get(_DEFAULT_BRANCH_NAME)
)
else:
branch_tuple = []
for i in range(2):
config = branches_conf.get(f"{branch}-{i}")
runnable = _load_model_from_path(
os.path.join(branches_path, branch, str(i)), config
)
branch_tuple.append(runnable)
branches.append(tuple(branch_tuple))
# default branch must be the last branch
branches.append(default)
return RunnableBranch(*branches)
def _load_runnable_assign(file_path: Union[Path, str]):
"""Load the model
Args:
file_path: Path to file to load the model from.
"""
from langchain.schema.runnable.passthrough import RunnableAssign
# Convert file to Path object.
load_path = Path(file_path)
if not load_path.exists() or not load_path.is_dir():
raise MlflowException(
f"File {load_path} must exist and must be a directory in order to load runnable."
)
mapper_file = load_path / _MAPPER_FOLDER_NAME
if not mapper_file.exists() or not mapper_file.is_dir():
raise MlflowException(
f"Folder {mapper_file} must exist and must be a directory "
"in order to load runnable assign with mapper."
)
mapper = _load_runnable_with_steps(mapper_file, "RunnableParallel")
return RunnableAssign(mapper)
def _save_internal_runnables(runnable, path, loader_fn, persist_dir):
conf = {}
if isinstance(runnable, lc_runnables_types()):
conf[_MODEL_TYPE_KEY] = runnable.__class__.__name__
conf.update(_save_runnables(runnable, path, loader_fn, persist_dir))
elif isinstance(runnable, base_lc_types()):
lc_model = _validate_and_wrap_lc_model(runnable, loader_fn)
conf[_MODEL_TYPE_KEY] = lc_model.__class__.__name__
conf.update(_save_base_lcs(lc_model, path, loader_fn, persist_dir))
else:
conf = {
_MODEL_TYPE_KEY: runnable.__class__.__name__,
_MODEL_DATA_KEY: _MODEL_DATA_YAML_FILE_NAME,
_MODEL_LOAD_KEY: _CONFIG_LOAD_KEY,
}
path = path / _MODEL_DATA_YAML_FILE_NAME
# Save some simple runnables that langchain natively supports.
if hasattr(runnable, "save"):
runnable.save(path)
# TODO: check if `dict` is enough to load it back
elif hasattr(runnable, "dict"):
runnable_dict = runnable.dict()
with open(path, "w") as f:
yaml.dump(runnable_dict, f, default_flow_style=False)
else:
return
return conf
def _save_runnable_with_steps(model, file_path: Union[Path, str], loader_fn=None, persist_dir=None):
"""Save the model with steps. Currently it supports saving RunnableSequence and
RunnableParallel.
If saving a RunnableSequence, steps is a list of Runnable objects. We save each step to the
subfolder named by the step index.
e.g. - model
- steps
- 0
- model.yaml
- 1
- model.pkl
- steps.yaml
If saving a RunnableParallel, steps is a dictionary of key-Runnable pairs. We save each step to
the subfolder named by the key.
e.g. - model
- steps
- context
- model.yaml
- question
- model.pkl
- steps.yaml
We save steps.yaml file to the model folder. It contains each step's model's configuration.
Args:
model: Runnable to be saved.
file_path: Path to file to save the model to.
"""
# Convert file to Path object.
save_path = Path(file_path)
save_path.mkdir(parents=True, exist_ok=True)
# Save steps into a folder
steps_path = save_path / _STEPS_FOLDER_NAME
steps_path.mkdir()
steps = model.steps
if isinstance(steps, list):
generator = enumerate(steps)
elif isinstance(steps, dict):
generator = steps.items()
else:
raise MlflowException(
f"Runnable {model} steps attribute must be either a list or a dictionary. "
f"Got {type(steps).__name__}."
)
unsaved_runnables = {}
steps_conf = {}
for key, runnable in generator:
step = str(key)
# Save each step into a subfolder named by step
save_runnable_path = steps_path / step
save_runnable_path.mkdir()
if result := _save_internal_runnables(runnable, save_runnable_path, loader_fn, persist_dir):
steps_conf[step] = result
else:
unsaved_runnables[step] = str(runnable)
if unsaved_runnables:
raise MlflowException(
f"Failed to save runnable sequence: {unsaved_runnables}. "
"Runnable must have either `save` or `dict` method."
)
# save steps configs
with save_path.joinpath(_RUNNABLE_STEPS_FILE_NAME).open("w") as f:
yaml.dump(steps_conf, f, default_flow_style=False)
def _save_runnable_branch(model, file_path, loader_fn, persist_dir):
"""
Save runnable branch in to path.
"""
save_path = Path(file_path)
save_path.mkdir(parents=True, exist_ok=True)
# save branches into a folder
branches_path = save_path / _BRANCHES_FOLDER_NAME
branches_path.mkdir()
unsaved_runnables = {}
branches_conf = {}
for index, branch_tuple in enumerate(model.branches):
# Save each branch into a subfolder named by index
# and save condition and runnable into subfolder
for i, runnable in enumerate(branch_tuple):
save_runnable_path = branches_path / str(index) / str(i)
save_runnable_path.mkdir(parents=True)
branches_conf[f"{index}-{i}"] = {}
if result := _save_internal_runnables(
runnable, save_runnable_path, loader_fn, persist_dir
):
branches_conf[f"{index}-{i}"] = result
else:
unsaved_runnables[f"{index}-{i}"] = str(runnable)
# save default branch
default_branch_path = branches_path / _DEFAULT_BRANCH_NAME
default_branch_path.mkdir()
if result := _save_internal_runnables(
model.default, default_branch_path, loader_fn, persist_dir
):
branches_conf[_DEFAULT_BRANCH_NAME] = result
else:
unsaved_runnables[_DEFAULT_BRANCH_NAME] = str(model.default)
if unsaved_runnables:
raise MlflowException(
f"Failed to save runnable branch: {unsaved_runnables}. "
"Runnable must have either `save` or `dict` method."
)
# save branches configs
with save_path.joinpath(_RUNNABLE_BRANCHES_FILE_NAME).open("w") as f:
yaml.dump(branches_conf, f, default_flow_style=False)
def _save_runnable_assign(model, file_path, loader_fn=None, persist_dir=None):
from langchain.schema.runnable import RunnableParallel
save_path = Path(file_path)
save_path.mkdir(parents=True, exist_ok=True)
# save mapper into a folder
mapper_path = save_path / _MAPPER_FOLDER_NAME
mapper_path.mkdir()
if not isinstance(model.mapper, RunnableParallel):
raise MlflowException(
f"Failed to save model {model} with type {model.__class__.__name__}. "
"RunnableAssign's mapper must be a RunnableParallel."
)
_save_runnable_with_steps(model.mapper, mapper_path, loader_fn, persist_dir)
def _save_picklable_runnable(model, path):
if not path.endswith(".pkl"):
raise ValueError(f"File path must end with .pkl, got {path}.")
with open(path, "wb") as f:
cloudpickle.dump(model, f)
def _save_runnables(model, path, loader_fn=None, persist_dir=None):
model_data_kwargs = {_MODEL_LOAD_KEY: _RUNNABLE_LOAD_KEY}
if isinstance(model, lc_runnable_with_steps_types()):
model_data_path = _MODEL_DATA_FOLDER_NAME
_save_runnable_with_steps(
model, os.path.join(path, model_data_path), loader_fn, persist_dir
)
elif isinstance(model, picklable_runnable_types()):
model_data_path = _MODEL_DATA_PKL_FILE_NAME
_save_picklable_runnable(model, os.path.join(path, model_data_path))
elif isinstance(model, lc_runnable_branch_types()):
model_data_path = _MODEL_DATA_FOLDER_NAME
_save_runnable_branch(model, os.path.join(path, model_data_path), loader_fn, persist_dir)
elif isinstance(model, lc_runnable_assign_types()):
model_data_path = _MODEL_DATA_FOLDER_NAME
_save_runnable_assign(model, os.path.join(path, model_data_path), loader_fn, persist_dir)
else:
raise MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=type(model).__name__)
)
model_data_kwargs.update({_MODEL_DATA_KEY: model_data_path})
return model_data_kwargs
def _load_runnables(path, conf):
model_type = conf.get(_MODEL_TYPE_KEY)
model_data = conf.get(_MODEL_DATA_KEY, _MODEL_DATA_YAML_FILE_NAME)
if model_type in (x.__name__ for x in lc_runnable_with_steps_types()):
return _load_runnable_with_steps(os.path.join(path, model_data), model_type)
if (
model_type in (x.__name__ for x in picklable_runnable_types())
or model_data == _MODEL_DATA_PKL_FILE_NAME
):
return _load_from_pickle(os.path.join(path, model_data))
if model_type in (x.__name__ for x in lc_runnable_branch_types()):
return _load_runnable_branch(os.path.join(path, model_data))
if model_type in (x.__name__ for x in lc_runnable_assign_types()):
return _load_runnable_assign(os.path.join(path, model_data))
raise MlflowException.invalid_parameter_value(
_UNSUPPORTED_MODEL_ERROR_MESSAGE.format(instance_type=model_type)
)
| [
"langchain.llms.get_type_to_cls_dict",
"langchain.schema.runnable.passthrough.RunnableAssign",
"langchain.chains.loading.load_chain",
"langchain.schema.runnable.RunnableParallel",
"langchain.schema.runnable.RunnableSequence",
"langchain.llms.loading.load_llm",
"langchain.schema.runnable.RunnableBranch",
"langchain.prompts.loading.load_prompt"
] | [((2386, 2443), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported type {_type} for loading."""'], {}), "(f'Unsupported type {_type} for loading.')\n", (2401, 2443), False, 'from mlflow.exceptions import MlflowException\n'), ((2853, 2915), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported model load key {model_load_fn}"""'], {}), "(f'Unsupported model load key {model_load_fn}')\n", (2868, 2915), False, 'from mlflow.exceptions import MlflowException\n'), ((3268, 3283), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (3272, 3283), False, 'from pathlib import Path\n'), ((3745, 3777), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['steps_conf_file'], {}), '(steps_conf_file)\n', (3760, 3777), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((5047, 5102), 'langchain.schema.runnable.RunnableSequence', 'RunnableSequence', ([], {'first': 'first', 'middle': 'middle', 'last': 'last'}), '(first=first, middle=middle, last=last)\n', (5063, 5102), False, 'from langchain.schema.runnable import RunnableSequence\n'), ((5367, 5382), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (5371, 5382), False, 'from pathlib import Path\n'), ((5859, 5894), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['branches_conf_file'], {}), '(branches_conf_file)\n', (5874, 5894), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((6218, 6243), 'os.listdir', 'os.listdir', (['branches_path'], {}), '(branches_path)\n', (6228, 6243), False, 'import os\n'), ((7003, 7028), 'langchain.schema.runnable.RunnableBranch', 'RunnableBranch', (['*branches'], {}), '(*branches)\n', (7017, 7028), False, 'from langchain.schema.runnable import RunnableBranch\n'), ((7305, 7320), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (7309, 7320), False, 'from pathlib import Path\n'), ((7881, 7903), 'langchain.schema.runnable.passthrough.RunnableAssign', 'RunnableAssign', (['mapper'], {}), '(mapper)\n', (7895, 7903), False, 'from langchain.schema.runnable.passthrough import RunnableAssign\n'), ((10183, 10198), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (10187, 10198), False, 'from pathlib import Path\n'), ((11645, 11660), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11649, 11660), False, 'from pathlib import Path\n'), ((13438, 13453), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (13442, 13453), False, 'from pathlib import Path\n'), ((1579, 1607), 'mlflow.langchain.utils._load_from_yaml', '_load_from_yaml', (['config_path'], {}), '(config_path)\n', (1594, 1607), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1976, 1999), 'langchain.chains.loading.load_chain', 'load_chain', (['config_path'], {}), '(config_path)\n', (1986, 1999), False, 'from langchain.chains.loading import load_chain\n'), ((2707, 2741), 'mlflow.langchain.utils._load_base_lcs', '_load_base_lcs', (['path', 'model_config'], {}), '(path, model_config)\n', (2721, 2741), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((3355, 3474), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (3370, 3474), False, 'from mlflow.exceptions import MlflowException\n'), ((3614, 3710), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {steps_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n f'File {steps_conf_file} must exist in order to load runnable with steps.')\n", (3629, 3710), False, 'from mlflow.exceptions import MlflowException\n'), ((3899, 4021), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {steps_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'Folder {steps_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (3914, 4021), False, 'from mlflow.exceptions import MlflowException\n'), ((4624, 4647), 'langchain.schema.runnable.RunnableParallel', 'RunnableParallel', (['steps'], {}), '(steps)\n', (4640, 4647), False, 'from langchain.schema.runnable import RunnableParallel\n'), ((5454, 5573), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (5469, 5573), False, 'from mlflow.exceptions import MlflowException\n'), ((5722, 5826), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {branches_conf_file} must exist in order to load runnable with steps."""'], {}), "(\n f'File {branches_conf_file} must exist in order to load runnable with steps.'\n )\n", (5737, 5826), False, 'from mlflow.exceptions import MlflowException\n'), ((6028, 6153), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {branches_path} must exist and must be a directory in order to load runnable with steps."""'], {}), "(\n f'Folder {branches_path} must exist and must be a directory in order to load runnable with steps.'\n )\n", (6043, 6153), False, 'from mlflow.exceptions import MlflowException\n'), ((7392, 7500), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""File {load_path} must exist and must be a directory in order to load runnable."""'], {}), "(\n f'File {load_path} must exist and must be a directory in order to load runnable.'\n )\n", (7407, 7500), False, 'from mlflow.exceptions import MlflowException\n'), ((7639, 7770), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper."""'], {}), "(\n f'Folder {mapper_file} must exist and must be a directory in order to load runnable assign with mapper.'\n )\n", (7654, 7770), False, 'from mlflow.exceptions import MlflowException\n'), ((8018, 8038), 'mlflow.langchain.utils.lc_runnables_types', 'lc_runnables_types', ([], {}), '()\n', (8036, 8038), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((11186, 11321), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n f'Failed to save runnable sequence: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n )\n", (11201, 11321), False, 'from mlflow.exceptions import MlflowException\n'), ((11454, 11504), 'yaml.dump', 'yaml.dump', (['steps_conf', 'f'], {'default_flow_style': '(False)'}), '(steps_conf, f, default_flow_style=False)\n', (11463, 11504), False, 'import yaml\n'), ((12955, 13088), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method."""'], {}), "(\n f'Failed to save runnable branch: {unsaved_runnables}. Runnable must have either `save` or `dict` method.'\n )\n", (12970, 13088), False, 'from mlflow.exceptions import MlflowException\n'), ((13227, 13280), 'yaml.dump', 'yaml.dump', (['branches_conf', 'f'], {'default_flow_style': '(False)'}), '(branches_conf, f, default_flow_style=False)\n', (13236, 13280), False, 'import yaml\n'), ((13679, 13827), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."""'], {}), '(\n f"Failed to save model {model} with type {model.__class__.__name__}. RunnableAssign\'s mapper must be a RunnableParallel."\n )\n', (13694, 13827), False, 'from mlflow.exceptions import MlflowException\n'), ((14126, 14152), 'cloudpickle.dump', 'cloudpickle.dump', (['model', 'f'], {}), '(model, f)\n', (14142, 14152), False, 'import cloudpickle\n'), ((14310, 14340), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (14338, 14340), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16218, 16283), 'mlflow.langchain.utils._UNSUPPORTED_MODEL_ERROR_MESSAGE.format', '_UNSUPPORTED_MODEL_ERROR_MESSAGE.format', ([], {'instance_type': 'model_type'}), '(instance_type=model_type)\n', (16257, 16283), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1665, 1693), 'mlflow.langchain.utils._load_from_json', '_load_from_json', (['config_path'], {}), '(config_path)\n', (1680, 1693), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((1718, 1810), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Cannot load runnable without a config file. Got path {config_path}."""'], {}), "(\n f'Cannot load runnable without a config file. Got path {config_path}.')\n", (1733, 1810), False, 'from mlflow.exceptions import MlflowException\n'), ((2107, 2131), 'langchain.prompts.loading.load_prompt', 'load_prompt', (['config_path'], {}), '(config_path)\n', (2118, 2131), False, 'from langchain.prompts.loading import load_prompt\n'), ((4119, 4141), 'os.listdir', 'os.listdir', (['steps_path'], {}), '(steps_path)\n', (4129, 4141), False, 'import os\n'), ((4297, 4327), 'os.path.join', 'os.path.join', (['steps_path', 'step'], {}), '(steps_path, step)\n', (4309, 4327), False, 'import os\n'), ((8208, 8223), 'mlflow.langchain.utils.base_lc_types', 'base_lc_types', ([], {}), '()\n', (8221, 8223), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((8245, 8293), 'mlflow.langchain.utils._validate_and_wrap_lc_model', '_validate_and_wrap_lc_model', (['runnable', 'loader_fn'], {}), '(runnable, loader_fn)\n', (8272, 8293), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14447, 14482), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14459, 14482), False, 'import os\n'), ((14544, 14570), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (14568, 14570), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15632, 15662), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15644, 15662), False, 'import os\n'), ((15847, 15877), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15859, 15877), False, 'import os\n'), ((15987, 16017), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (15999, 16017), False, 'import os\n'), ((16127, 16157), 'os.path.join', 'os.path.join', (['path', 'model_data'], {}), '(path, model_data)\n', (16139, 16157), False, 'import os\n'), ((2150, 2177), 'langchain.llms.get_type_to_cls_dict', 'llms_get_type_to_cls_dict', ([], {}), '()\n', (2175, 2177), True, 'from langchain.llms import get_type_to_cls_dict as llms_get_type_to_cls_dict\n'), ((2247, 2268), 'langchain.llms.loading.load_llm', 'load_llm', (['config_path'], {}), '(config_path)\n', (2255, 2268), False, 'from langchain.llms.loading import load_llm\n'), ((8374, 8428), 'mlflow.langchain.utils._save_base_lcs', '_save_base_lcs', (['lc_model', 'path', 'loader_fn', 'persist_dir'], {}), '(lc_model, path, loader_fn, persist_dir)\n', (8388, 8428), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14665, 14700), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14677, 14700), False, 'import os\n'), ((14729, 14755), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (14753, 14755), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15558, 15588), 'mlflow.langchain.utils.lc_runnable_with_steps_types', 'lc_runnable_with_steps_types', ([], {}), '()\n', (15586, 15588), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15921, 15947), 'mlflow.langchain.utils.lc_runnable_branch_types', 'lc_runnable_branch_types', ([], {}), '()\n', (15945, 15947), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((16061, 16087), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (16085, 16087), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((2287, 2315), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2313, 2315), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((14845, 14880), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (14857, 14880), False, 'import os\n'), ((14933, 14959), 'mlflow.langchain.utils.lc_runnable_assign_types', 'lc_runnable_assign_types', ([], {}), '()\n', (14957, 14959), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((15728, 15754), 'mlflow.langchain.utils.picklable_runnable_types', 'picklable_runnable_types', ([], {}), '()\n', (15752, 15754), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n'), ((9016, 9069), 'yaml.dump', 'yaml.dump', (['runnable_dict', 'f'], {'default_flow_style': '(False)'}), '(runnable_dict, f, default_flow_style=False)\n', (9025, 9069), False, 'import yaml\n'), ((15049, 15084), 'os.path.join', 'os.path.join', (['path', 'model_data_path'], {}), '(path, model_data_path)\n', (15061, 15084), False, 'import os\n'), ((2332, 2360), 'mlflow.langchain.utils.custom_type_to_loader_dict', 'custom_type_to_loader_dict', ([], {}), '()\n', (2358, 2360), False, 'from mlflow.langchain.utils import _BASE_LOAD_KEY, _CONFIG_LOAD_KEY, _MODEL_DATA_FOLDER_NAME, _MODEL_DATA_KEY, _MODEL_DATA_PKL_FILE_NAME, _MODEL_DATA_YAML_FILE_NAME, _MODEL_LOAD_KEY, _MODEL_TYPE_KEY, _RUNNABLE_LOAD_KEY, _UNSUPPORTED_MODEL_ERROR_MESSAGE, _load_base_lcs, _load_from_json, _load_from_pickle, _load_from_yaml, _save_base_lcs, _validate_and_wrap_lc_model, base_lc_types, custom_type_to_loader_dict, lc_runnable_assign_types, lc_runnable_branch_types, lc_runnable_with_steps_types, lc_runnables_types, picklable_runnable_types\n')] |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
if start_index != -1 and end_index != -1:
extracted_content = json_string[start_index + len("```json"):end_index].strip()
# Parse the JSON string into a Python dictionary
parsed = json.loads(extracted_content)
elif start_index != -1 and end_index == -1 and json_string.endswith("``"):
end_index = json_string.find("``", start_index + len("```json"))
extracted_content = json_string[start_index + len("```json"):end_index].strip()
# Parse the JSON string into a Python dictionary
parsed = json.loads(extracted_content)
elif json_string.startswith("{"):
# Parse the JSON string into a Python dictionary
parsed = json.loads(json_string)
else:
raise Exception("Could not find JSON block in the output.")
return parsed
def parse_and_check_json_markdown(text: str, expected_keys: list[str]) -> dict:
try:
json_obj = parse_json_markdown(text)
except json.JSONDecodeError as e:
raise OutputParserException(f"Got invalid JSON object. Error: {e}")
for key in expected_keys:
if key not in json_obj:
raise OutputParserException(
f"Got invalid return object. Expected key `{key}` "
f"to be present, but got {json_obj}"
)
return json_obj
| [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid JSON object. Error: {e}"""'], {}), "(f'Got invalid JSON object. Error: {e}')\n", (1343, 1383), False, 'from langchain.schema import OutputParserException\n'), ((1464, 1581), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Got invalid return object. Expected key `{key}` to be present, but got {json_obj}"""'], {}), "(\n f'Got invalid return object. Expected key `{key}` to be present, but got {json_obj}'\n )\n", (1485, 1581), False, 'from langchain.schema import OutputParserException\n'), ((1013, 1036), 'json.loads', 'json.loads', (['json_string'], {}), '(json_string)\n', (1023, 1036), False, 'import json\n')] |
import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agents import AgentAction
from langchain_core.callbacks import BaseCallbackHandler
from langchain_openai import ChatOpenAI
from pydantic import (
UUID4,
BaseModel,
ConfigDict,
Field,
InstanceOf,
PrivateAttr,
field_validator,
model_validator,
)
from pydantic_core import PydanticCustomError
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
from crewai.utilities import I18N, Logger, Prompts, RPMController
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
class Agent(BaseModel):
"""Represents an agent in a system.
Each agent has a role, a goal, a backstory, and an optional language model (llm).
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
Attributes:
agent_executor: An instance of the CrewAgentExecutor class.
role: The role of the agent.
goal: The objective of the agent.
backstory: The backstory of the agent.
config: Dict representation of agent configuration.
llm: The language model that will run the agent.
function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm.
max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
tools: Tools at agents disposal
step_callback: Callback to be executed after each step of the agent execution.
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
"""
__hash__ = object.__hash__ # type: ignore
_logger: Logger = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr(default=None)
_request_within_rpm_limit: Any = PrivateAttr(default=None)
_token_process: TokenProcess = TokenProcess()
formatting_errors: int = 0
model_config = ConfigDict(arbitrary_types_allowed=True)
id: UUID4 = Field(
default_factory=uuid.uuid4,
frozen=True,
description="Unique identifier for the object, not set by user.",
)
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
config: Optional[Dict[str, Any]] = Field(
description="Configuration for the agent",
default=None,
)
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the agent execution to be respected.",
)
memory: bool = Field(
default=False, description="Whether the agent should have memory or not"
)
verbose: bool = Field(
default=False, description="Verbose mode for the Agent Execution"
)
allow_delegation: bool = Field(
default=True, description="Allow delegation of tasks to agents"
)
tools: Optional[List[Any]] = Field(
default_factory=list, description="Tools at agents disposal"
)
max_iter: Optional[int] = Field(
default=15, description="Maximum iterations for an agent to execute a task"
)
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
default=None, description="An instance of the CrewAgentExecutor class."
)
tools_handler: InstanceOf[ToolsHandler] = Field(
default=None, description="An instance of the ToolsHandler class."
)
cache_handler: InstanceOf[CacheHandler] = Field(
default=CacheHandler(), description="An instance of the CacheHandler class."
)
step_callback: Optional[Any] = Field(
default=None,
description="Callback to be executed after each step of the agent execution.",
)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
llm: Any = Field(
default_factory=lambda: ChatOpenAI(
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4")
),
description="Language model that will run the agent.",
)
function_calling_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None
)
callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
default=None, description="Callback to be executed"
)
def __init__(__pydantic_self__, **data):
config = data.pop("config", {})
super().__init__(**config, **data)
@field_validator("id", mode="before")
@classmethod
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
if v:
raise PydanticCustomError(
"may_not_set_field", "This field is not to be set by the user.", {}
)
@model_validator(mode="after")
def set_attributes_based_on_config(self) -> "Agent":
"""Set attributes based on the agent configuration."""
if self.config:
for key, value in self.config.items():
setattr(self, key, value)
return self
@model_validator(mode="after")
def set_private_attrs(self):
"""Set private attributes."""
self._logger = Logger(self.verbose)
if self.max_rpm and not self._rpm_controller:
self._rpm_controller = RPMController(
max_rpm=self.max_rpm, logger=self._logger
)
return self
@model_validator(mode="after")
def set_agent_executor(self) -> "Agent":
"""set agent executor is set."""
if hasattr(self.llm, "model_name"):
self.llm.callbacks = [
TokenCalcHandler(self.llm.model_name, self._token_process)
]
if not self.agent_executor:
self.set_cache_handler(self.cache_handler)
return self
def execute_task(
self,
task: Any,
context: Optional[str] = None,
tools: Optional[List[Any]] = None,
) -> str:
"""Execute a task with the agent.
Args:
task: Task to execute.
context: Context to execute the task in.
tools: Tools to use for the task.
Returns:
Output of the agent
"""
self.tools_handler.last_used_tool = {}
task_prompt = task.prompt()
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
)
tools = self._parse_tools(tools or self.tools)
self.create_agent_executor(tools=tools)
self.agent_executor.tools = tools
self.agent_executor.task = task
self.agent_executor.tools_description = render_text_description(tools)
self.agent_executor.tools_names = self.__tools_names(tools)
result = self.agent_executor.invoke(
{
"input": task_prompt,
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
}
)["output"]
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return result
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
"""Set the cache handler for the agent.
Args:
cache_handler: An instance of the CacheHandler class.
"""
self.cache_handler = cache_handler
self.tools_handler = ToolsHandler(cache=self.cache_handler)
self.create_agent_executor()
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
"""Set the rpm controller for the agent.
Args:
rpm_controller: An instance of the RPMController class.
"""
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.create_agent_executor()
def create_agent_executor(self, tools=None) -> None:
"""Create an agent executor for the agent.
Returns:
An instance of the CrewAgentExecutor class.
"""
tools = tools or self.tools
agent_args = {
"input": lambda x: x["input"],
"tools": lambda x: x["tools"],
"tool_names": lambda x: x["tool_names"],
"agent_scratchpad": lambda x: self.format_log_to_str(
x["intermediate_steps"]
),
}
executor_args = {
"llm": self.llm,
"i18n": self.i18n,
"tools": self._parse_tools(tools),
"verbose": self.verbose,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"step_callback": self.step_callback,
"tools_handler": self.tools_handler,
"function_calling_llm": self.function_calling_llm,
"callbacks": self.callbacks,
}
if self._rpm_controller:
executor_args[
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
if self.memory:
summary_memory = ConversationSummaryMemory(
llm=self.llm, input_key="input", memory_key="chat_history"
)
executor_args["memory"] = summary_memory
agent_args["chat_history"] = lambda x: x["chat_history"]
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution_with_memory()
else:
prompt = Prompts(i18n=self.i18n, tools=tools).task_execution()
execution_prompt = prompt.partial(
goal=self.goal,
role=self.role,
backstory=self.backstory,
)
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
self.agent_executor = CrewAgentExecutor(
agent=RunnableAgent(runnable=inner_agent), **executor_args
)
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Interpolate inputs into the agent description and backstory."""
if inputs:
self.role = self.role.format(**inputs)
self.goal = self.goal.format(**inputs)
self.backstory = self.backstory.format(**inputs)
def increment_formatting_errors(self) -> None:
"""Count the formatting errors of the agent."""
self.formatting_errors += 1
def format_log_to_str(
self,
intermediate_steps: List[Tuple[AgentAction, str]],
observation_prefix: str = "Observation: ",
llm_prefix: str = "",
) -> str:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
return thoughts
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
"""Parse tools to be used for the task."""
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
tools_list = []
try:
from crewai_tools import BaseTool as CrewAITool
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_langchain())
else:
tools_list.append(tool)
except ModuleNotFoundError:
for tool in tools:
tools_list.append(tool)
return tools_list
@staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools])
def __repr__(self):
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
| [
"langchain.tools.render.render_text_description",
"langchain.agents.agent.RunnableAgent",
"langchain.memory.ConversationSummaryMemory"
] | [((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2454, 2468), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2506, 2531), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (2517, 2531), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2567, 2581), 'crewai.utilities.token_counter_callback.TokenProcess', 'TokenProcess', ([], {}), '()\n', (2579, 2581), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((2633, 2673), 'pydantic.ConfigDict', 'ConfigDict', ([], {'arbitrary_types_allowed': '(True)'}), '(arbitrary_types_allowed=True)\n', (2643, 2673), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2690, 2807), 'pydantic.Field', 'Field', ([], {'default_factory': 'uuid.uuid4', 'frozen': '(True)', 'description': '"""Unique identifier for the object, not set by user."""'}), "(default_factory=uuid.uuid4, frozen=True, description=\n 'Unique identifier for the object, not set by user.')\n", (2695, 2807), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2850, 2888), 'pydantic.Field', 'Field', ([], {'description': '"""Role of the agent"""'}), "(description='Role of the agent')\n", (2855, 2888), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2905, 2948), 'pydantic.Field', 'Field', ([], {'description': '"""Objective of the agent"""'}), "(description='Objective of the agent')\n", (2910, 2948), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2970, 3013), 'pydantic.Field', 'Field', ([], {'description': '"""Backstory of the agent"""'}), "(description='Backstory of the agent')\n", (2975, 3013), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3053, 3115), 'pydantic.Field', 'Field', ([], {'description': '"""Configuration for the agent"""', 'default': 'None'}), "(description='Configuration for the agent', default=None)\n", (3058, 3115), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3168, 3291), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Maximum number of requests per minute for the agent execution to be respected."""'}), "(default=None, description=\n 'Maximum number of requests per minute for the agent execution to be respected.'\n )\n", (3173, 3291), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3324, 3403), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the agent should have memory or not"""'}), "(default=False, description='Whether the agent should have memory or not')\n", (3329, 3403), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3438, 3510), 'pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Verbose mode for the Agent Execution"""'}), "(default=False, description='Verbose mode for the Agent Execution')\n", (3443, 3510), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3554, 3624), 'pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Allow delegation of tasks to agents"""'}), "(default=True, description='Allow delegation of tasks to agents')\n", (3559, 3624), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3672, 3739), 'pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Tools at agents disposal"""'}), "(default_factory=list, description='Tools at agents disposal')\n", (3677, 3739), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3784, 3871), 'pydantic.Field', 'Field', ([], {'default': '(15)', 'description': '"""Maximum iterations for an agent to execute a task"""'}), "(default=15, description=\n 'Maximum iterations for an agent to execute a task')\n", (3789, 3871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((3933, 4011), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the CrewAgentExecutor class."""'}), "(default=None, description='An instance of the CrewAgentExecutor class.')\n", (3938, 4011), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4072, 4145), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""An instance of the ToolsHandler class."""'}), "(default=None, description='An instance of the ToolsHandler class.')\n", (4077, 4145), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4339, 4442), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed after each step of the agent execution."""'}), "(default=None, description=\n 'Callback to be executed after each step of the agent execution.')\n", (4344, 4442), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4797, 4871), 'pydantic.Field', 'Field', ([], {'description': '"""Language model that will run the agent."""', 'default': 'None'}), "(description='Language model that will run the agent.', default=None)\n", (4802, 4871), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((4951, 5009), 'pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Callback to be executed"""'}), "(default=None, description='Callback to be executed')\n", (4956, 5009), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5159, 5195), 'pydantic.field_validator', 'field_validator', (['"""id"""'], {'mode': '"""before"""'}), "('id', mode='before')\n", (5174, 5195), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5430, 5459), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5445, 5459), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5723, 5752), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (5738, 5752), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((6070, 6099), 'pydantic.model_validator', 'model_validator', ([], {'mode': '"""after"""'}), "(mode='after')\n", (6085, 6099), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((5847, 5867), 'crewai.utilities.Logger', 'Logger', (['self.verbose'], {}), '(self.verbose)\n', (5853, 5867), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((7345, 7375), 'langchain.tools.render.render_text_description', 'render_text_description', (['tools'], {}), '(tools)\n', (7368, 7375), False, 'from langchain.tools.render import render_text_description\n'), ((8088, 8126), 'crewai.agents.ToolsHandler', 'ToolsHandler', ([], {'cache': 'self.cache_handler'}), '(cache=self.cache_handler)\n', (8100, 8126), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4229, 4243), 'crewai.agents.CacheHandler', 'CacheHandler', ([], {}), '()\n', (4241, 4243), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((4492, 4498), 'crewai.utilities.I18N', 'I18N', ([], {}), '()\n', (4496, 4498), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((5305, 5397), 'pydantic_core.PydanticCustomError', 'PydanticCustomError', (['"""may_not_set_field"""', '"""This field is not to be set by the user."""', '{}'], {}), "('may_not_set_field',\n 'This field is not to be set by the user.', {})\n", (5324, 5397), False, 'from pydantic_core import PydanticCustomError\n'), ((5957, 6013), 'crewai.utilities.RPMController', 'RPMController', ([], {'max_rpm': 'self.max_rpm', 'logger': 'self._logger'}), '(max_rpm=self.max_rpm, logger=self._logger)\n', (5970, 6013), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((9715, 9805), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {'llm': 'self.llm', 'input_key': '"""input"""', 'memory_key': '"""chat_history"""'}), "(llm=self.llm, input_key='input', memory_key=\n 'chat_history')\n", (9740, 9805), False, 'from langchain.memory import ConversationSummaryMemory\n'), ((10407, 10434), 'crewai.agents.CrewAgentParser', 'CrewAgentParser', ([], {'agent': 'self'}), '(agent=self)\n', (10422, 10434), False, 'from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler\n'), ((6281, 6339), 'crewai.utilities.token_counter_callback.TokenCalcHandler', 'TokenCalcHandler', (['self.llm.model_name', 'self._token_process'], {}), '(self.llm.model_name, self._token_process)\n', (6297, 6339), False, 'from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess\n'), ((10502, 10537), 'langchain.agents.agent.RunnableAgent', 'RunnableAgent', ([], {'runnable': 'inner_agent'}), '(runnable=inner_agent)\n', (10515, 10537), False, 'from langchain.agents.agent import RunnableAgent\n'), ((9974, 10010), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (9981, 10010), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((10075, 10111), 'crewai.utilities.Prompts', 'Prompts', ([], {'i18n': 'self.i18n', 'tools': 'tools'}), '(i18n=self.i18n, tools=tools)\n', (10082, 10111), False, 'from crewai.utilities import I18N, Logger, Prompts, RPMController\n'), ((4630, 4674), 'os.environ.get', 'os.environ.get', (['"""OPENAI_MODEL_NAME"""', '"""gpt-4"""'], {}), "('OPENAI_MODEL_NAME', 'gpt-4')\n", (4644, 4674), False, 'import os\n')] |
import os
import logging
import hashlib
import PyPDF2
from tqdm import tqdm
from modules.presets import *
from modules.utils import *
from modules.config import local_embedding
def get_documents(file_src):
from langchain.schema import Document
from langchain.text_splitter import TokenTextSplitter
text_splitter = TokenTextSplitter(chunk_size=500, chunk_overlap=30)
documents = []
logging.debug("Loading documents...")
logging.debug(f"file_src: {file_src}")
for file in file_src:
filepath = file.name
filename = os.path.basename(filepath)
file_type = os.path.splitext(filename)[1]
logging.info(f"loading file: {filename}")
texts = None
try:
if file_type == ".pdf":
logging.debug("Loading PDF...")
try:
from modules.pdf_func import parse_pdf
from modules.config import advance_docs
two_column = advance_docs["pdf"].get("two_column", False)
pdftext = parse_pdf(filepath, two_column).text
except:
pdftext = ""
with open(filepath, "rb") as pdfFileObj:
pdfReader = PyPDF2.PdfReader(pdfFileObj)
for page in tqdm(pdfReader.pages):
pdftext += page.extract_text()
texts = [Document(page_content=pdftext,
metadata={"source": filepath})]
elif file_type == ".docx":
logging.debug("Loading Word...")
from langchain.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(filepath)
texts = loader.load()
elif file_type == ".pptx":
logging.debug("Loading PowerPoint...")
from langchain.document_loaders import UnstructuredPowerPointLoader
loader = UnstructuredPowerPointLoader(filepath)
texts = loader.load()
elif file_type == ".epub":
logging.debug("Loading EPUB...")
from langchain.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(filepath)
texts = loader.load()
elif file_type == ".xlsx":
logging.debug("Loading Excel...")
text_list = excel_to_string(filepath)
texts = []
for elem in text_list:
texts.append(Document(page_content=elem,
metadata={"source": filepath}))
elif file_type in [".jpg", ".jpeg", ".png", ".heif", ".heic", ".webp", ".bmp", ".gif", ".tiff", ".tif"]:
raise gr.Warning(i18n("不支持的文件: ") + filename + i18n(",请使用 .pdf, .docx, .pptx, .epub, .xlsx 等文档。"))
else:
logging.debug("Loading text file...")
from langchain.document_loaders import TextLoader
loader = TextLoader(filepath, "utf8")
texts = loader.load()
except Exception as e:
import traceback
logging.error(f"Error loading file: {filename}")
traceback.print_exc()
if texts is not None:
texts = text_splitter.split_documents(texts)
documents.extend(texts)
logging.debug("Documents loaded.")
return documents
def construct_index(
api_key,
file_src,
max_input_size=4096,
num_outputs=5,
max_chunk_overlap=20,
chunk_size_limit=600,
embedding_limit=None,
separator=" ",
load_from_cache_if_possible=True,
):
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import FAISS
if api_key:
os.environ["OPENAI_API_KEY"] = api_key
else:
# 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
logging.debug(f"api base: {os.environ.get('OPENAI_API_BASE', None)}")
chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
embedding_limit = None if embedding_limit == 0 else embedding_limit
separator = " " if separator == "" else separator
index_name = get_file_hash(file_src)
index_path = f"./index/{index_name}"
if local_embedding:
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/distiluse-base-multilingual-cased-v2")
else:
from langchain.embeddings import OpenAIEmbeddings
if os.environ.get("OPENAI_API_TYPE", "openai") == "openai":
embeddings = OpenAIEmbeddings(openai_api_base=os.environ.get(
"OPENAI_API_BASE", None), openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key))
else:
embeddings = OpenAIEmbeddings(deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
model=os.environ["AZURE_EMBEDDING_MODEL_NAME"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"], openai_api_type="azure")
if os.path.exists(index_path) and load_from_cache_if_possible:
logging.info(i18n("找到了缓存的索引文件,加载中……"))
return FAISS.load_local(index_path, embeddings)
else:
documents = get_documents(file_src)
logging.debug(i18n("构建索引中……"))
if documents:
with retrieve_proxy():
index = FAISS.from_documents(documents, embeddings)
else:
raise Exception(i18n("没有找到任何支持的文档。"))
logging.debug(i18n("索引构建完成!"))
os.makedirs("./index", exist_ok=True)
index.save_local(index_path)
logging.debug(i18n("索引已保存至本地!"))
return index
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.document_loaders.UnstructuredWordDocumentLoader",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.UnstructuredPowerPointLoader",
"langchain.schema.Document",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.vectorstores.FAISS.load_local",
"langchain.document_loaders.TextLoader",
"langchain.document_loaders.UnstructuredEPubLoader",
"langchain.text_splitter.TokenTextSplitter"
] | [((330, 381), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(30)'}), '(chunk_size=500, chunk_overlap=30)\n', (347, 381), False, 'from langchain.text_splitter import TokenTextSplitter\n'), ((406, 443), 'logging.debug', 'logging.debug', (['"""Loading documents..."""'], {}), "('Loading documents...')\n", (419, 443), False, 'import logging\n'), ((448, 486), 'logging.debug', 'logging.debug', (['f"""file_src: {file_src}"""'], {}), "(f'file_src: {file_src}')\n", (461, 486), False, 'import logging\n'), ((3415, 3449), 'logging.debug', 'logging.debug', (['"""Documents loaded."""'], {}), "('Documents loaded.')\n", (3428, 3449), False, 'import logging\n'), ((561, 587), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (577, 587), False, 'import os\n'), ((646, 687), 'logging.info', 'logging.info', (['f"""loading file: {filename}"""'], {}), "(f'loading file: {filename}')\n", (658, 687), False, 'import logging\n'), ((4440, 4539), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/distiluse-base-multilingual-cased-v2"""'}), "(model_name=\n 'sentence-transformers/distiluse-base-multilingual-cased-v2')\n", (4461, 4539), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((5212, 5238), 'os.path.exists', 'os.path.exists', (['index_path'], {}), '(index_path)\n', (5226, 5238), False, 'import os\n'), ((5334, 5374), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['index_path', 'embeddings'], {}), '(index_path, embeddings)\n', (5350, 5374), False, 'from langchain.vectorstores import FAISS\n'), ((5704, 5741), 'os.makedirs', 'os.makedirs', (['"""./index"""'], {'exist_ok': '(True)'}), "('./index', exist_ok=True)\n", (5715, 5741), False, 'import os\n'), ((608, 634), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (624, 634), False, 'import os\n'), ((4627, 4670), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_TYPE"""', '"""openai"""'], {}), "('OPENAI_API_TYPE', 'openai')\n", (4641, 4670), False, 'import os\n'), ((4907, 5176), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': "os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME']", 'openai_api_key': "os.environ['AZURE_OPENAI_API_KEY']", 'model': "os.environ['AZURE_EMBEDDING_MODEL_NAME']", 'openai_api_base': "os.environ['AZURE_OPENAI_API_BASE_URL']", 'openai_api_type': '"""azure"""'}), "(deployment=os.environ['AZURE_EMBEDDING_DEPLOYMENT_NAME'],\n openai_api_key=os.environ['AZURE_OPENAI_API_KEY'], model=os.environ[\n 'AZURE_EMBEDDING_MODEL_NAME'], openai_api_base=os.environ[\n 'AZURE_OPENAI_API_BASE_URL'], openai_api_type='azure')\n", (4923, 5176), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((774, 805), 'logging.debug', 'logging.debug', (['"""Loading PDF..."""'], {}), "('Loading PDF...')\n", (787, 805), False, 'import logging\n'), ((3204, 3252), 'logging.error', 'logging.error', (['f"""Error loading file: {filename}"""'], {}), "(f'Error loading file: {filename}')\n", (3217, 3252), False, 'import logging\n'), ((3265, 3286), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3284, 3286), False, 'import traceback\n'), ((3993, 4032), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4007, 4032), False, 'import os\n'), ((5549, 5592), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (5569, 5592), False, 'from langchain.vectorstores import FAISS\n'), ((1418, 1479), 'langchain.schema.Document', 'Document', ([], {'page_content': 'pdftext', 'metadata': "{'source': filepath}"}), "(page_content=pdftext, metadata={'source': filepath})\n", (1426, 1479), False, 'from langchain.schema import Document\n'), ((1570, 1602), 'logging.debug', 'logging.debug', (['"""Loading Word..."""'], {}), "('Loading Word...')\n", (1583, 1602), False, 'import logging\n'), ((1714, 1754), 'langchain.document_loaders.UnstructuredWordDocumentLoader', 'UnstructuredWordDocumentLoader', (['filepath'], {}), '(filepath)\n', (1744, 1754), False, 'from langchain.document_loaders import UnstructuredWordDocumentLoader\n'), ((4742, 4781), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', 'None'], {}), "('OPENAI_API_BASE', None)\n", (4756, 4781), False, 'import os\n'), ((4815, 4866), 'os.environ.get', 'os.environ.get', (['"""OPENAI_EMBEDDING_API_KEY"""', 'api_key'], {}), "('OPENAI_EMBEDDING_API_KEY', api_key)\n", (4829, 4866), False, 'import os\n'), ((1055, 1086), 'modules.pdf_func.parse_pdf', 'parse_pdf', (['filepath', 'two_column'], {}), '(filepath, two_column)\n', (1064, 1086), False, 'from modules.pdf_func import parse_pdf\n'), ((1848, 1886), 'logging.debug', 'logging.debug', (['"""Loading PowerPoint..."""'], {}), "('Loading PowerPoint...')\n", (1861, 1886), False, 'import logging\n'), ((1996, 2034), 'langchain.document_loaders.UnstructuredPowerPointLoader', 'UnstructuredPowerPointLoader', (['filepath'], {}), '(filepath)\n', (2024, 2034), False, 'from langchain.document_loaders import UnstructuredPowerPointLoader\n'), ((1246, 1274), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (1262, 1274), False, 'import PyPDF2\n'), ((1311, 1332), 'tqdm.tqdm', 'tqdm', (['pdfReader.pages'], {}), '(pdfReader.pages)\n', (1315, 1332), False, 'from tqdm import tqdm\n'), ((2128, 2160), 'logging.debug', 'logging.debug', (['"""Loading EPUB..."""'], {}), "('Loading EPUB...')\n", (2141, 2160), False, 'import logging\n'), ((2264, 2296), 'langchain.document_loaders.UnstructuredEPubLoader', 'UnstructuredEPubLoader', (['filepath'], {}), '(filepath)\n', (2286, 2296), False, 'from langchain.document_loaders import UnstructuredEPubLoader\n'), ((2390, 2423), 'logging.debug', 'logging.debug', (['"""Loading Excel..."""'], {}), "('Loading Excel...')\n", (2403, 2423), False, 'import logging\n'), ((2936, 2973), 'logging.debug', 'logging.debug', (['"""Loading text file..."""'], {}), "('Loading text file...')\n", (2949, 2973), False, 'import logging\n'), ((3065, 3093), 'langchain.document_loaders.TextLoader', 'TextLoader', (['filepath', '"""utf8"""'], {}), "(filepath, 'utf8')\n", (3075, 3093), False, 'from langchain.document_loaders import TextLoader\n'), ((2577, 2635), 'langchain.schema.Document', 'Document', ([], {'page_content': 'elem', 'metadata': "{'source': filepath}"}), "(page_content=elem, metadata={'source': filepath})\n", (2585, 2635), False, 'from langchain.schema import Document\n')] |
import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answer
Thought: you should always think about what to do
Action: Exactly only one word out of: {tool_names}
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question"""
FORMAT_INSTRUCTIONS = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you. For example user might ask: What is the latest news?
Here is an example sequence you can follow:
Thought: I should search online for the latest news.
Action: Search
Action Input: What is the latest news?
Observation: X is going away. Z is again happening.
Thought: That is interesting, I should search for more information about X and Z and also search about Q.
Action: Search
Action Input: How is X impacting things. Why is Z happening again, and what are the consequences?
Observation: X is causing Y. Z may be caused by P and will lead to H.
Thought: I now know the final answer
Final Answer: The latest news is:
* X is going away, and this is caused by Y.
* Z is happening again, and the cause is P and will lead to H.
Overall, X and Z are important problems.
"""
FORMAT_INSTRUCTIONS_PYTHON = """List of tools, use exactly one word when choosing Action: {tool_names}
Only user asks a question, not you. For example user might ask: How many rows are in the dataset?
Here is an example sequence you can follow. You can repeat Thoughts, but as soon as possible you should try to answer the original user question. Once you an answer the user question, just say: Thought: I now know the final answer
Thought: I should use python_repl_ast tool.
Action: python_repl_ast
Action Input: df.shape
Observation: (25, 10)
Thought: I now know the final answer
Final Answer: There are 25 rows in the dataset.
"""
FINAL_ANSWER_ACTION = "Final Answer:"
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action:' after 'Thought:"
)
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = (
"Invalid Format: Missing 'Action Input:' after 'Action:'"
)
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (
"Parsing LLM output produced both a final answer and a parse-able action:"
)
class H2OMRKLOutputParser(MRKLOutputParser):
"""MRKL Output parser for the chat agent."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
regex = (
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
)
action_match = re.search(regex, text, re.DOTALL)
if includes_answer:
return AgentFinish(
{"output": text.split(FINAL_ANSWER_ACTION)[-1].strip()}, text
)
elif action_match:
action = action_match.group(1).strip()
action_input = action_match.group(2)
tool_input = action_input.strip(" ")
# ensure if its a well formed SQL query we don't remove any trailing " chars
if tool_input.startswith("SELECT ") is False:
tool_input = tool_input.strip('"')
return AgentAction(action, tool_input, text)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
):
raise OutputParserException(
f"Could not parse LLM output: `{text}`",
observation=MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
llm_output=text,
send_to_llm=True,
)
else:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
@property
def _type(self) -> str:
return "mrkl"
class H2OPythonMRKLOutputParser(H2OMRKLOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS_PYTHON
| [
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DOTALL)\n", (3698, 3749), False, 'import re\n'), ((3766, 3928), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (3787, 3928), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((3635, 3672), 'langchain.schema.AgentAction', 'AgentAction', (['action', 'tool_input', 'text'], {}), '(action, tool_input, text)\n', (3646, 3672), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4016, 4103), 're.search', 're.search', (['"""[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)"""', 'text', 're.DOTALL'], {}), "('[\\\\s]*Action\\\\s*\\\\d*\\\\s*Input\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*)', text, re.\n DOTALL)\n", (4025, 4103), False, 'import re\n'), ((4133, 4300), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {'observation': 'MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE', 'llm_output': 'text', 'send_to_llm': '(True)'}), "(f'Could not parse LLM output: `{text}`', observation=\n MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, llm_output=text,\n send_to_llm=True)\n", (4154, 4300), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n'), ((4403, 4465), 'langchain.schema.OutputParserException', 'OutputParserException', (['f"""Could not parse LLM output: `{text}`"""'], {}), "(f'Could not parse LLM output: `{text}`')\n", (4424, 4465), False, 'from langchain.schema import AgentAction, AgentFinish, OutputParserException\n')] |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
ServiceUnavailableError,
)
from reworkd_platform.schemas.agent import ModelSettings
from reworkd_platform.web.api.errors import OpenAIError
T = TypeVar("T")
def parse_with_handling(parser: BaseOutputParser[T], completion: str) -> T:
try:
return parser.parse(completion)
except OutputParserException as e:
raise OpenAIError(
e, "There was an issue parsing the response from the AI model."
)
async def openai_error_handler(
func: Callable[..., Any], *args: Any, settings: ModelSettings, **kwargs: Any
) -> Any:
try:
return await func(*args, **kwargs)
except ServiceUnavailableError as e:
raise OpenAIError(
e,
"OpenAI is experiencing issues. Visit "
"https://status.openai.com/ for more info.",
should_log=not settings.custom_api_key,
)
except InvalidRequestError as e:
if e.user_message.startswith("The model:"):
raise OpenAIError(
e,
f"Your API key does not have access to your current model. Please use a different model.",
should_log=not settings.custom_api_key,
)
raise OpenAIError(e, e.user_message)
except AuthenticationError as e:
raise OpenAIError(
e,
"Authentication error: Ensure a valid API key is being used.",
should_log=not settings.custom_api_key,
)
except RateLimitError as e:
if e.user_message.startswith("You exceeded your current quota"):
raise OpenAIError(
e,
f"Your API key exceeded your current quota, please check your plan and billing details.",
should_log=not settings.custom_api_key,
)
raise OpenAIError(e, e.user_message)
except Exception as e:
raise OpenAIError(
e, "There was an unexpected issue getting a response from the AI model."
)
async def call_model_with_handling(
model: BaseChatModel,
prompt: BasePromptTemplate,
args: Dict[str, str],
settings: ModelSettings,
**kwargs: Any,
) -> str:
chain = LLMChain(llm=model, prompt=prompt)
return await openai_error_handler(chain.arun, args, settings=settings, **kwargs)
| [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BasePromptTemplate, LLMChain\n'), ((662, 738), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an issue parsing the response from the AI model."""'], {}), "(e, 'There was an issue parsing the response from the AI model.')\n", (673, 738), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((993, 1138), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""OpenAI is experiencing issues. Visit https://status.openai.com/ for more info."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'OpenAI is experiencing issues. Visit https://status.openai.com/ for more info.'\n , should_log=not settings.custom_api_key)\n", (1004, 1138), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1522, 1552), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (1533, 1552), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1604, 1729), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""Authentication error: Ensure a valid API key is being used."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n 'Authentication error: Ensure a valid API key is being used.',\n should_log=not settings.custom_api_key)\n", (1615, 1729), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2114, 2144), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'e.user_message'], {}), '(e, e.user_message)\n', (2125, 2144), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((2186, 2275), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', '"""There was an unexpected issue getting a response from the AI model."""'], {}), "(e,\n 'There was an unexpected issue getting a response from the AI model.')\n", (2197, 2275), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1299, 1453), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key does not have access to your current model. Please use a different model."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key does not have access to your current model. Please use a different model.'\n , should_log=not settings.custom_api_key)\n", (1310, 1453), False, 'from reworkd_platform.web.api.errors import OpenAIError\n'), ((1892, 2045), 'reworkd_platform.web.api.errors.OpenAIError', 'OpenAIError', (['e', 'f"""Your API key exceeded your current quota, please check your plan and billing details."""'], {'should_log': '(not settings.custom_api_key)'}), "(e,\n f'Your API key exceeded your current quota, please check your plan and billing details.'\n , should_log=not settings.custom_api_key)\n", (1903, 2045), False, 'from reworkd_platform.web.api.errors import OpenAIError\n')] |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies
from utils.gpt_interaction import GPTModel
from utils.prompts import SYSTEM
from utils.embeddings import EMBEDDINGS
from utils.gpt_interaction import get_gpt_responses
TOTAL_TOKENS = 0
TOTAL_PROMPTS_TOKENS = 0
TOTAL_COMPLETION_TOKENS = 0
def log_usage(usage, generating_target, print_out=True):
global TOTAL_TOKENS
global TOTAL_PROMPTS_TOKENS
global TOTAL_COMPLETION_TOKENS
prompts_tokens = usage['prompt_tokens']
completion_tokens = usage['completion_tokens']
total_tokens = usage['total_tokens']
TOTAL_TOKENS += total_tokens
TOTAL_PROMPTS_TOKENS += prompts_tokens
TOTAL_COMPLETION_TOKENS += completion_tokens
message = f">>USAGE>> For generating {generating_target}, {total_tokens} tokens have been used " \
f"({prompts_tokens} for prompts; {completion_tokens} for completion). " \
f"{TOTAL_TOKENS} tokens have been used in total."
if print_out:
print(message)
logging.info(message)
def _generation_setup(title, template="Default",
tldr=False, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048, # generating references
knowledge_database=None, max_tokens_kd=2048, query_counts=10):
llm = GPTModel(model="gpt-3.5-turbo-16k")
bibtex_path, destination_folder = copy_templates(template, title)
logging.basicConfig(level=logging.INFO, filename=os.path.join(destination_folder, "generation.log"))
#generate key words
keywords, usage = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
log_usage(usage, "keywords")
keywords = {keyword: max_kw_refs for keyword in keywords}
print("Keywords: \n", keywords)
#generate references
ref = References(title, bib_refs)
ref.collect_papers(keywords, tldr=tldr)
references = ref.to_prompts(max_tokens=max_tokens_ref)
all_paper_ids = ref.to_bibtex(bibtex_path)
#product domain knowledge
prompts = f"Title: {title}"
preliminaries_kw, _ = llm(systems=SYSTEM["preliminaries"], prompts=prompts)
# check if the database exists or not
db_path = f"utils/knowledge_databases/{knowledge_database}"
db_config_path = os.path.join(db_path, "db_meta.json")
db_index_path = os.path.join(db_path, "faiss_index")
if os.path.isdir(db_path):
try:
with open(db_config_path, "r", encoding="utf-8") as f:
db_config = json.load(f)
model_name = db_config["embedding_model"]
embeddings = EMBEDDINGS[model_name]
db = FAISS.load_local(db_index_path, embeddings)
knowledge = Knowledge(db=db)
knowledge.collect_knowledge(preliminaries_kw, max_query=query_counts)
domain_knowledge = knowledge.to_prompts(max_tokens_kd)
except Exception as e:
domain_knowledge=''
prompts = f"Title: {title}"
syetem_promot = "You are an assistant designed to propose necessary components of an survey papers. Your response should follow the JSON format."
components, usage = llm(systems=syetem_promot, prompts=prompts, return_json=True)
log_usage(usage, "media")
print(f"The paper information has been initialized. References are saved to {bibtex_path}.")
paper = {}
paper["title"] = title
paper["references"] = references
paper["bibtex"] = bibtex_path
paper["components"] = components
paper["domain_knowledge"] = domain_knowledge
return paper, destination_folder, all_paper_ids
def section_generation(paper, section, save_to_path, model, research_field="machine learning"):
"""
The main pipeline of generating a section.
1. Generate prompts.
2. Get responses from AI assistant.
3. Extract the section text.
4. Save the text to .tex file.
:return usage
"""
title = paper["title"]
references = paper["references"]
components = paper['components']
instruction = '- Discuss three to five main related fields to this paper. For each field, select five to ten key publications from references. For each reference, analyze its strengths and weaknesses in one or two sentences. Present the related works in a logical manner, often chronologically. Consider using a taxonomy or categorization to structure the discussion. Do not use \section{...} or \subsection{...}; use \paragraph{...} to list related fields.'
fundamental_subprompt = "Your task is to write the {section} section of the paper with the title '{title}'. This paper has the following content: {components}\n"
instruction_subprompt = "\n" \
"Your response should follow the following instructions:\n" \
"{instruction}\n"
ref_instruction_subprompt = "- Read references. " \
"Every time you use information from the references, you need to appropriately cite it (using \citep or \citet)." \
"For example of \citep, the sentence where you use information from lei2022adaptive \citep{{lei2022adaptive}}. " \
"For example of \citet, \citet{{lei2022adaptive}} claims some information.\n" \
"- Avoid citing the same reference in a same paragraph.\n" \
"\n" \
"References:\n" \
"{references}"
output_subprompt = "Ensure that it can be directly compiled by LeTaX."
reivew_prompts = PromptTemplate(
input_variables=["title", "components", "instruction", "section", "references"],
template=fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt + output_subprompt)
prompts = reivew_prompts.format(title=title,
components=components,
instruction=instruction,
section=section,
references=references)
SECTION_GENERATION_SYSTEM = PromptTemplate(input_variables=["research_field"],
template="You are an assistant designed to write academic papers in the field of {research_field} using LaTeX." )
output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts,
model=model, temperature=0.4)
output=output[25:]
tex_file = os.path.join(save_to_path, f"{section}.tex")
with open(tex_file, "w", encoding="utf-8") as f:
f.write(output)
use_md =True
use_chinese = True
if use_md:
system_md = 'You are an translator between the LaTeX and .MD. here is a latex file where the content is: \n \n ' + output
prompts_md = 'you should transfer the latex content to the .MD format seriously, and pay attention to the correctness of the citation format (use the number). you should directly output the new content without anyoter replay. you should add reference papers at the end of the paper, and add line breaks between two reference papers. The Title should be ' + paper['title']
output_md, usage_md = get_gpt_responses(system_md, prompts_md,
model=model, temperature=0.4)
md_file = os.path.join(save_to_path, f"{'survey'}.md")
with open(md_file, "w", encoding="utf-8") as m:
m.write(output_md)
if use_chinese == True:
system_md_chi = 'You are an translator between the english and chinese. here is a english file where the content is: \n \n ' + output
prompts_md_chi = 'you should transfer the english to chinese and dont change anything others. you should directly output the new content without anyoter replay. you should keep the reference papers unchanged.'
output_md_chi, usage_md_chi = get_gpt_responses(system_md_chi, prompts_md_chi,
model=model, temperature=0.4)
md_file_chi = os.path.join(save_to_path, f"{'survey_chinese'}.md")
with open(md_file_chi, "w", encoding="utf-8") as c:
c.write(output_md_chi)
return usage
def generate_draft(title, tldr=True, max_kw_refs=20, bib_refs=None, max_tokens_ref=2048,
knowledge_database=None, max_tokens_kd=2048, query_counts=10,
section='related works', model="gpt-3.5-turbo-16k", template="Default"
, save_zip=None):
print("================START================")
paper, destination_folder, _ = _generation_setup(title, template, tldr, max_kw_refs, bib_refs,
max_tokens_ref=max_tokens_ref, max_tokens_kd=max_tokens_kd,
query_counts=query_counts,
knowledge_database=knowledge_database)
# main components
print(f"================PROCESSING================")
usage = section_generation(paper, section, destination_folder, model=model)
log_usage(usage, section)
create_copies(destination_folder)
print("\nPROCESSING COMPLETE\n")
return make_archive(destination_folder, title+".zip")
print("draft has been generated in " + destination_folder)
if __name__ == "__main__":
import openai
openai.api_key = "your key"
openai.api_base = 'https://api.openai.com/v1'
#openai.proxy = "socks5h://localhost:7890 # if use the vpn
target_title = "Reinforcement Learning for Robot Control"
generate_draft(target_title, knowledge_database="ml_textbook_test",max_kw_refs=20)
| [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GPTModel\n'), ((1626, 1657), 'utils.file_operations.copy_templates', 'copy_templates', (['template', 'title'], {}), '(template, title)\n', (1640, 1657), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((2042, 2069), 'utils.references.References', 'References', (['title', 'bib_refs'], {}), '(title, bib_refs)\n', (2052, 2069), False, 'from utils.references import References\n'), ((5824, 6030), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['title', 'components', 'instruction', 'section', 'references']", 'template': '(fundamental_subprompt + instruction_subprompt + ref_instruction_subprompt +\n output_subprompt)'}), "(input_variables=['title', 'components', 'instruction',\n 'section', 'references'], template=fundamental_subprompt +\n instruction_subprompt + ref_instruction_subprompt + output_subprompt)\n", (5838, 6030), False, 'from langchain import PromptTemplate\n'), ((6353, 6526), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['research_field']", 'template': '"""You are an assistant designed to write academic papers in the field of {research_field} using LaTeX."""'}), "(input_variables=['research_field'], template=\n 'You are an assistant designed to write academic papers in the field of {research_field} using LaTeX.'\n )\n", (6367, 6526), False, 'from langchain import PromptTemplate\n'), ((9472, 9505), 'utils.tex_processing.create_copies', 'create_copies', (['destination_folder'], {}), '(destination_folder)\n', (9485, 9505), False, 'from utils.tex_processing import create_copies\n'), ((9554, 9602), 'utils.file_operations.make_archive', 'make_archive', (['destination_folder', "(title + '.zip')"], {}), "(destination_folder, title + '.zip')\n", (9566, 9602), False, 'from utils.file_operations import make_archive, copy_templates\n'), ((7503, 7573), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md', 'prompts_md'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md, prompts_md, model=model, temperature=0.4)\n', (7520, 7573), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2856, 2899), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['db_index_path', 'embeddings'], {}), '(db_index_path, embeddings)\n', (2872, 2899), False, 'from langchain.vectorstores import FAISS\n'), ((2924, 2940), 'utils.knowledge.Knowledge', 'Knowledge', ([], {'db': 'db'}), '(db=db)\n', (2933, 2940), False, 'from utils.knowledge import Knowledge\n'), ((8211, 8289), 'utils.gpt_interaction.get_gpt_responses', 'get_gpt_responses', (['system_md_chi', 'prompts_md_chi'], {'model': 'model', 'temperature': '(0.4)'}), '(system_md_chi, prompts_md_chi, model=model, temperature=0.4)\n', (8228, 8289), False, 'from utils.gpt_interaction import get_gpt_responses\n'), ((2724, 2736), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2733, 2736), False, 'import json\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 49