Spaces:
Paused
Paused
from langchain_openai.chat_models import ChatOpenAI | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
from langchain.tools.render import format_tool_to_openai_function | |
from langgraph.prebuilt import ToolExecutor,ToolInvocation | |
from typing import TypedDict, Annotated, Sequence | |
import operator | |
from langchain_core.messages import BaseMessage,FunctionMessage,HumanMessage | |
from langchain.tools import ShellTool | |
import json | |
import os | |
import gradio as gr | |
os.environ["LANGCHAIN_TRACING_V2"] ="True" | |
os.environ["LANGCHAIN_API_KEY"]="ls__54e16f70b2b0455aad0f2cbf47777d30" | |
os.environ["OPENAI_API_KEY"]="20a79668d6113e99b35fcd541c65bfeaec497b8262c111bd328ef5f1ad8c6335" | |
# os.environ["OPENAI_API_KEY"]="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" | |
os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com" | |
os.environ["LANGCHAIN_PROJECT"]="default" | |
os.environ['TAVILY_API_KEY'] = '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' | |
class AgentState(TypedDict): | |
messages: Annotated[Sequence[BaseMessage], operator.add] | |
model = ChatOpenAI(model="gpt-3.5-turbo-1106",api_key="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") | |
shell_tool = ShellTool() | |
tools = [TavilySearchResults(max_results=1),shell_tool] | |
functions = [format_tool_to_openai_function(t) for t in tools] | |
model = model.bind_functions(functions) | |
tool_executor = ToolExecutor(tools) | |
# Define the function that determines whether to continue or not | |
def should_continue(state): | |
messages = state['messages'] | |
last_message = messages[-1] | |
# If there is no function call, then we finish | |
if "function_call" not in last_message.additional_kwargs: | |
return "end" | |
# Otherwise if there is, we continue | |
else: | |
return "continue" | |
# Define the function that calls the model | |
def call_model(state): | |
messages = state['messages'] | |
response = model.invoke(messages) | |
# We return a list, because this will get added to the existing list | |
return {"messages": [response]} | |
# Define the function to execute tools | |
def call_tool(state): | |
messages = state['messages'] | |
# Based on the continue condition | |
# we know the last message involves a function call | |
last_message = messages[-1] | |
# We construct an ToolInvocation from the function_call | |
action = ToolInvocation( | |
tool=last_message.additional_kwargs["function_call"]["name"], | |
tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]), | |
) | |
# We call the tool_executor and get back a response | |
response = tool_executor.invoke(action) | |
# We use the response to create a FunctionMessage | |
function_message = FunctionMessage(content=str(response), name=action.tool) | |
# We return a list, because this will get added to the existing list | |
return {"messages": [function_message]} | |
from langgraph.graph import StateGraph, END | |
# Define a new graph | |
workflow = StateGraph(AgentState) | |
# Define the two nodes we will cycle between | |
workflow.add_node("agent", call_model) | |
workflow.add_node("action", call_tool) | |
# Set the entrypoint as `agent` | |
# This means that this node is the first one called | |
workflow.set_entry_point("agent") | |
# We now add a conditional edge | |
workflow.add_conditional_edges( | |
# First, we define the start node. We use `agent`. | |
# This means these are the edges taken after the `agent` node is called. | |
"agent", | |
# Next, we pass in the function that will determine which node is called next. | |
should_continue, | |
# Finally we pass in a mapping. | |
# The keys are strings, and the values are other nodes. | |
# END is a special node marking that the graph should finish. | |
# What will happen is we will call `should_continue`, and then the output of that | |
# will be matched against the keys in this mapping. | |
# Based on which one it matches, that node will then be called. | |
{ | |
# If `tools`, then we call the tool node. | |
"continue": "action", | |
# Otherwise we finish. | |
"end": END | |
} | |
) | |
# We now add a normal edge from `tools` to `agent`. | |
# This means that after `tools` is called, `agent` node is called next. | |
workflow.add_edge('action', 'agent') | |
# Finally, we compile it! | |
# This compiles it into a LangChain Runnable, | |
# meaning you can use it as you would any other runnable | |
app = workflow.compile() | |
# inputs = {"messages": [HumanMessage(content="查询你的cast命令版本")]} | |
# app.invoke(inputs) | |
async def predict(question): | |
que={"messages": [HumanMessage(content=question)]} | |
res=app.invoke(que) | |
if res: | |
return(res["messages"][-1].content) | |
else:print("不好意思,出了一个小问题,请联系我的微信:13603634456") | |
gr.Interface( | |
predict,inputs="textbox", | |
outputs="textbox", | |
title="定制版AI专家BOT-0.1版", | |
description="这是一个定制版的AI专家BOT,你可以通过输入问题,让AI为你回答。\n目前提供三个示例工具:\n1.bash命令行执行工具,可以将人类语言转化为bash命令,然后执行。\n2.搜索引擎").launch() | |