File size: 5,365 Bytes
ca7e9c6 f57b8d4 ca7e9c6 f57b8d4 ca7e9c6 a7f3b3b ca7e9c6 e0753bf ca7e9c6 f2f63e5 ca7e9c6 f2f63e5 e0753bf f57b8d4 e0753bf f57b8d4 ca7e9c6 a7f3b3b a653647 ca7e9c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
##############################################################################
# Agent interfaces that bridges private capability agents (pandas,
# sql, ...), 3rd party plugin agents (search, weather, movie, ...),
# and 3rd party LLMs
#
# @philmui
# Mon May 1 18:34:45 PDT 2023
##############################################################################
from langchain.schema import HumanMessage
from langchain.prompts import PromptTemplate, ChatPromptTemplate, \
HumanMessagePromptTemplate
from models import load_chat_agent, load_chained_agent, load_sales_agent, \
load_sqlite_agent, load_book_agent
import openai, numpy as np
import logging
logger = logging.getLogger(__name__)
# To parse outputs and get structured data back
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
instruct_template = """
Please answer this question clearly with easy to follow reasoning:
{query}
If you don't know the answer, just reply: not available.
"""
instruct_prompt = PromptTemplate(
input_variables=["query"],
template=instruct_template
)
response_schemas = [
ResponseSchema(name="artist",
description="The name of the musical artist"),
ResponseSchema(name="song",
description="The name of the song that the artist plays")
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
LOCAL_MAGIC_TOKENS = ["my company", "for us", "our company", "our sales"]
DIGITAL_MAGIC_TOKENS = ["digital media", "our database", "our digital"]
def is_magic(sentence, magic_tokens):
return any([t in sentence.lower() for t in magic_tokens])
chat_prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(
"Given a command from the user, extract the artist and \
song names \n{format_instructions}\n{user_prompt}")
],
input_variables=["user_prompt"],
partial_variables={"format_instructions": format_instructions}
)
def chatAgent(chat_message):
try:
agent = load_chat_agent(verbose=True)
output = agent([HumanMessage(content=chat_message)])
except:
output = "Please rephrase and try chat again."
return output
def salesAgent(instruction):
output = ""
try:
agent = load_sales_agent(verbose=True)
output = agent.run(instruction)
print("panda> " + output)
except Exception as e:
logger.error(e)
output = f"Rephrasing your prompt could get better sales results {e}"
return output
def chinookAgent(instruction, model_name):
output = ""
try:
agent = load_sqlite_agent(model_name)
output = agent.run(instruction)
print("chinook> " + output)
except Exception as e:
logger.error(e)
output = "Rephrasing your prompt could get better db results {e}"
return output
def semantically_similar(string1, string2):
#
# proper way to do this is to use a
# vector DB (chroma, pinecone, ...)
#
response = openai.Embedding.create(
input=[string1, string2],
engine="text-similarity-davinci-001"
)
embedding_a = response['data'][0]['embedding']
embedding_b = response['data'][1]['embedding']
similarity_score = np.dot(embedding_a, embedding_b)
logger.info(f"similarity: {similarity_score}")
return similarity_score > 0.8
def bookAgent(query):
output = ""
try:
agent = load_book_agent(True)
result = agent({
"query": query
})
logger.info(f"book response: {result['result']}")
output = result['result']
except Exception as e:
logger.error(e)
output = "Rephrasing your prompt for the book agent{e}"
return output
def agentController(question_text, model_name):
output = ""
# deterministic
if is_magic(question_text, LOCAL_MAGIC_TOKENS):
output = salesAgent(question_text)
print(f"๐น salesAgent: {output}")
elif is_magic(question_text, DIGITAL_MAGIC_TOKENS):
output = chinookAgent(question_text, model_name)
print(f"๐น chinookAgent: {output}")
# semantic similarity search
elif semantically_similar(question_text, "fight a war"):
output = bookAgent(question_text)
print(f"๐น bookAgent: {output}")
elif semantically_similar(question_text, "how to govern"):
# for illustration: use same bookAgent -- should/could be something else
output = bookAgent(question_text)
print(f"๐น bookAgent: {output}")
else: # reasoning agents
try:
instruction = instruct_prompt.format(query=question_text)
logger.info(f"instruction: {instruction}")
agent = load_chained_agent(verbose=True, model_name=model_name)
response = agent([instruction])
if response is None or "not available" in response["output"]:
response = ""
else:
output = response['output']
logger.info(f"๐น Steps: {response['intermediate_steps']}")
except Exception as e:
output = "Most likely ran out of tokens ..."
logger.error(e)
return output
|