Update app.py
Browse files
app.py
CHANGED
@@ -19,11 +19,11 @@ memory = ConversationBufferMemory(memory_key="chat_history")
|
|
19 |
|
20 |
persist_directory="db"
|
21 |
llm=OpenAI(model_name = "text-davinci-003", temperature=0)
|
22 |
-
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
23 |
model_name = "hkunlp/instructor-large"
|
24 |
embed_instruction = "Represent the text from the BMW website for retrieval"
|
25 |
query_instruction = "Query the most relevant text from the BMW website"
|
26 |
embeddings = HuggingFaceInstructEmbeddings(model_name=model_name, embed_instruction=embed_instruction, query_instruction=query_instruction)
|
|
|
27 |
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=db.as_retriever(), memory=memory)
|
28 |
|
29 |
def chat(message, history):
|
|
|
19 |
|
20 |
persist_directory="db"
|
21 |
llm=OpenAI(model_name = "text-davinci-003", temperature=0)
|
|
|
22 |
model_name = "hkunlp/instructor-large"
|
23 |
embed_instruction = "Represent the text from the BMW website for retrieval"
|
24 |
query_instruction = "Query the most relevant text from the BMW website"
|
25 |
embeddings = HuggingFaceInstructEmbeddings(model_name=model_name, embed_instruction=embed_instruction, query_instruction=query_instruction)
|
26 |
+
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
|
27 |
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=db.as_retriever(), memory=memory)
|
28 |
|
29 |
def chat(message, history):
|