Daniel Marques commited on
Commit
fe4b5f1
1 Parent(s): 6229292

feat: add prompt template

Browse files
Files changed (2) hide show
  1. main.py +11 -5
  2. static/index.html +1 -1
main.py CHANGED
@@ -13,6 +13,7 @@ import subprocess
13
  import torch
14
  from langchain.chains import RetrievalQA
15
  from langchain.embeddings import HuggingFaceInstructEmbeddings
 
16
 
17
  # from langchain.embeddings import HuggingFaceEmbeddings
18
  from run_localGPT import load_model
@@ -49,20 +50,25 @@ RETRIEVER = DB.as_retriever()
49
  LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
50
  prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
51
 
 
 
 
 
 
 
 
52
  QA = RetrievalQA.from_chain_type(
53
  llm=LLM,
54
  chain_type="stuff",
55
  retriever=RETRIEVER,
56
  return_source_documents=SHOW_SOURCES,
57
  chain_type_kwargs={
58
- "prompt": prompt,
59
  },
60
  )
61
 
62
- system_message = """
63
- you are a helpful, respectful and honest assistant. you should only respond to the following topics: water, climate, global warming, NASA data and geography. Always answer in the most helpful and safe way possible. Your answers should not include harmful, unethical, racist, sexist, toxic, dangerous or illegal content. Make sure that your answers are socially unbiased and positive in nature, as well as sticking to the topics of water, climate, global warming, NASA data and geography.
64
- If a question doesn't make sense or isn't factually coherent, explain that only questions on the topics of water, climate, global warming, NASA data and geography are accepted. If you don't know the answer to a question, don't share false information.
65
- """
66
 
67
  class Predict(BaseModel):
68
  prompt: str
 
13
  import torch
14
  from langchain.chains import RetrievalQA
15
  from langchain.embeddings import HuggingFaceInstructEmbeddings
16
+ from langchain.prompts import PromptTemplate
17
 
18
  # from langchain.embeddings import HuggingFaceEmbeddings
19
  from run_localGPT import load_model
 
50
  LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
51
  prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
52
 
53
+ template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
54
+ {context}
55
+ Question: {question}
56
+ Helpful Answer:"""
57
+
58
+ QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
59
+
60
  QA = RetrievalQA.from_chain_type(
61
  llm=LLM,
62
  chain_type="stuff",
63
  retriever=RETRIEVER,
64
  return_source_documents=SHOW_SOURCES,
65
  chain_type_kwargs={
66
+ "prompt": QA_CHAIN_PROMPT,
67
  },
68
  )
69
 
70
+
71
+ QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
 
 
72
 
73
  class Predict(BaseModel):
74
  prompt: str
static/index.html CHANGED
@@ -240,7 +240,7 @@
240
  </style>
241
  <body>
242
  <div class="container">
243
- <div class="titlegpt">LocalGPT</div>
244
  </div>
245
  </body>
246
  </html>
 
240
  </style>
241
  <body>
242
  <div class="container">
243
+ <div class="titlegpt">KARATA LLA</div>
244
  </div>
245
  </body>
246
  </html>