Clement Vachet commited on
Commit
58453b6
·
1 Parent(s): c8e3c55

Add system prompt for llm chain

Browse files
Files changed (1) hide show
  1. app.py +13 -1
app.py CHANGED
@@ -9,6 +9,7 @@ from langchain_huggingface import HuggingFaceEmbeddings
9
  from langchain.chains import ConversationChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain_huggingface import HuggingFaceEndpoint
 
12
 
13
  from pathlib import Path
14
  import chromadb
@@ -29,6 +30,16 @@ _ = load_dotenv()
29
  huggingfacehub_api_token = os.environ.get("HUGGINGFACE_API_KEY")
30
 
31
 
 
 
 
 
 
 
 
 
 
 
32
  # default_persist_directory = './chroma_HF/'
33
  list_llm = ["mistralai/Mistral-7B-Instruct-v0.3", "microsoft/Phi-3.5-mini-instruct", \
34
  "meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct", "meta-llama/Meta-Llama-3-8B-Instruct", \
@@ -106,12 +117,13 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
106
  # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
107
  retriever=vector_db.as_retriever()
108
  progress(0.8, desc="Defining retrieval chain...")
 
109
  qa_chain = ConversationalRetrievalChain.from_llm(
110
  llm,
111
  retriever=retriever,
112
  chain_type="stuff",
113
  memory=memory,
114
- # combine_docs_chain_kwargs={"prompt": rag_prompt},
115
  return_source_documents=True,
116
  #return_generated_question=False,
117
  verbose=False,
 
9
  from langchain.chains import ConversationChain
10
  from langchain.memory import ConversationBufferMemory
11
  from langchain_huggingface import HuggingFaceEndpoint
12
+ from langchain_core.prompts import PromptTemplate
13
 
14
  from pathlib import Path
15
  import chromadb
 
30
  huggingfacehub_api_token = os.environ.get("HUGGINGFACE_API_KEY")
31
 
32
 
33
+ # Add system template for RAG application
34
+ prompt_template = """
35
+ You are an assistant for question-answering tasks. Use the following pieces of context to answer the question at the end.
36
+ If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer concise.
37
+ Question: {question}
38
+ Context: {context}
39
+ Helpful Answer:
40
+ """
41
+
42
+
43
  # default_persist_directory = './chroma_HF/'
44
  list_llm = ["mistralai/Mistral-7B-Instruct-v0.3", "microsoft/Phi-3.5-mini-instruct", \
45
  "meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct", "meta-llama/Meta-Llama-3-8B-Instruct", \
 
117
  # retriever=vector_db.as_retriever(search_type="similarity", search_kwargs={'k': 3})
118
  retriever=vector_db.as_retriever()
119
  progress(0.8, desc="Defining retrieval chain...")
120
+ rag_prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
121
  qa_chain = ConversationalRetrievalChain.from_llm(
122
  llm,
123
  retriever=retriever,
124
  chain_type="stuff",
125
  memory=memory,
126
+ combine_docs_chain_kwargs={"prompt": rag_prompt},
127
  return_source_documents=True,
128
  #return_generated_question=False,
129
  verbose=False,