hlydecker's picture
Duplicate from hlydecker/Augmented-Retrieval-qa-ChatGPT
1ce95c4
raw
history blame contribute delete
No virus
4.38 kB
import langchain.prompts as prompts
from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from datetime import datetime
summary_template = """Summarize and provide direct quotes from the text below to help answer a question.
Do not directly answer the question, instead provide a summary and quotes with the context of the user's question.
Do not use outside sources.
Reply with "Not applicable" if the text is unrelated to the question.
Use 75 or less words.
Remember, if the user does not specify a language, reply in the language of the user's question.
{context_str}
User's question: {question}
Relevant Information Summary:"""
summary_prompt = prompts.PromptTemplate(
input_variables=["question", "context_str"],
template=summary_template,
)
qa_template = """Write an answer for the user's question below solely based on the provided context.
If the user does not specify how many words the answer should be, the length of the answer should be {length}.
If the context is irrelevant, reply "Your question falls outside the scope of University of Sydney policy, so I cannot answer".
For each sentence in your answer, indicate which sources most support it via valid citation markers at the end of sentences, like (Example2012).
Answer in an unbiased and professional tone.
Make clear what is your opinion.
Use Markdown for formatting code or text, and try to use direct quotes to support arguments.
Remember, if the user does not specify a language, answer in the language of the user's question.
Context:
{context_str}
User's question: {question}
Answer:
"""
qa_prompt = prompts.PromptTemplate(
input_variables=["question", "context_str", "length"],
template=qa_template,
)
# usado por GPCL
qa_prompt_GPCL = prompts.PromptTemplate(
input_variables=["question", "context_str"],
template="You are an AI assistant providing helpful advice about University of Sydney policy. You are given the following extracted parts of a long document and a question. Provide a conversational answer based on the context provided."
"You should only provide hyperlinks that reference the context below. Do NOT make up hyperlinks."
'If you can not find the answer in the context below, just say "Hmm, I am not sure. Could you please rephrase your question?" Do not try to make up an answer.'
"If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.\n\n"
"Question: {question}\n"
"=========\n"
"{context_str}\n"
"=========\n"
"Answer in Markdown:",
)
search_prompt = prompts.PromptTemplate(
input_variables=["question"],
template="We want to answer the following question: {question} \n"
"Provide three different targeted keyword searches (one search per line) "
"that will find papers that help answer the question. Do not use boolean operators. "
"Recent years are 2021, 2022, 2023.\n\n"
"1.",
)
def _get_datetime():
now = datetime.now()
return now.strftime("%m/%d/%Y")
citation_prompt = prompts.PromptTemplate(
input_variables=["text"],
template="Provide a possible citation for the following text in MLA Format. Today's date is {date}\n"
"{text}\n\n"
"Citation:",
partial_variables={"date": _get_datetime},
)
system_template = """You are an AI chatbot with knowledge of the University of Sydney's legal policies that answers in an unbiased, professional tone.
You sometimes refuse to answer if there is insufficient information.
If the user does not specify a language, answer in the language of the user's question. """
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
human_summary_message_prompt = HumanMessagePromptTemplate.from_template(summary_template)
chat_summary_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_summary_message_prompt])
human_qa_message_prompt = HumanMessagePromptTemplate.from_template(qa_template)
# chat_qa_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_qa_message_prompt]) # TODO: borrar
# human_condense_message_prompt = HumanMessagePromptTemplate.from_template(condense_template)
# chat_condense_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_condense_message_prompt])