Spaces:
Paused
Paused
""" | |
This file implements prompt template for llama based models. | |
Modify the prompt template based on the model you select. | |
This seems to have significant impact on the output of the LLM. | |
""" | |
from langchain.memory import ConversationBufferMemory | |
from langchain.prompts import PromptTemplate | |
# this is specific to Llama-2. | |
# system_prompt = """You are a helpful assistant, you will use the context and documents provided in the training to answer users questions. | |
# Read the context provided before answering questions and think step by step. If you can't answer a user's question based on the | |
# context provided, inform the user. Don't use any other information to answer the user.""" | |
# system_prompt = """You are a helpful assistant, and you will use the context and documents provided in the training to answer users' questions. Please read the context provided carefully before responding to questions and follow a step-by-step thought process. If you cannot answer a user's question based on the provided context, please inform the user. Do not use any other information to answer the user. Provide a detailed response based on the content of locally trained documents.""" | |
system_prompt = """It's a useful assistant that will use the context and documents provided in the training to answer users' questions. | |
Read the context provided before answering the questions and think step by step. Your answer cannot be more than 2000 words long. | |
If you can't answer, just say "I don't know" and don't try to work out an answer to respond to the user.""" | |
def get_prompt_template(system_prompt=system_prompt, promptTemplate_type=None, history=False): | |
if promptTemplate_type == "llama": | |
B_INST, E_INST = "[INST]", "[/INST]" | |
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" | |
SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS | |
if history: | |
instruction = """ | |
Context: {history} \n {context} | |
User: {question}""" | |
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST | |
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) | |
else: | |
instruction = """ | |
Context: {context} | |
User: {question}""" | |
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST | |
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) | |
elif promptTemplate_type == "mistral": | |
B_INST, E_INST = "<s>[INST] ", " [/INST]" | |
if history: | |
prompt_template = ( | |
B_INST | |
+ system_prompt | |
+ """ | |
Context: {history} \n {context} | |
User: {question}""" | |
+ E_INST | |
) | |
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) | |
else: | |
prompt_template = ( | |
B_INST | |
+ system_prompt | |
+ """ | |
Context: {context} | |
User: {question}""" | |
+ E_INST | |
) | |
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) | |
else: | |
# change this based on the model you have selected. | |
if history: | |
prompt_template = ( | |
system_prompt | |
+ """ | |
Context: {history} \n {context} | |
User: {question} | |
Answer:""" | |
) | |
prompt = PromptTemplate(input_variables=["history", "context", "question"], template=prompt_template) | |
else: | |
prompt_template = ( | |
system_prompt | |
+ """ | |
Context: {context} | |
User: {question} | |
Answer:""" | |
) | |
prompt = PromptTemplate(input_variables=["context", "question"], template=prompt_template) | |
memory = ConversationBufferMemory(input_key="question", memory_key="history") | |
return ( | |
prompt, | |
memory, | |
) | |