Update functions.py
Browse files- functions.py +1 -3
functions.py
CHANGED
@@ -336,8 +336,6 @@ def chunk_and_preprocess_text(text, model_name= 'philschmid/flan-t5-base-samsum'
|
|
336 |
|
337 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
338 |
sentences = sent_tokenize(text)
|
339 |
-
|
340 |
-
print("sentences: {sentences}")
|
341 |
|
342 |
# initialize
|
343 |
length = 0
|
@@ -574,7 +572,7 @@ def embed_text(query,_docsearch):
|
|
574 |
|
575 |
question_generator = LLMChain(llm=chat_llm, prompt=CONDENSE_QUESTION_PROMPT)
|
576 |
doc_chain = load_qa_chain(llm=chat_llm,chain_type="stuff",prompt=load_prompt())
|
577 |
-
chain = ConversationalRetrievalChain(retriever=
|
578 |
question_generator=question_generator,
|
579 |
combine_docs_chain=doc_chain,
|
580 |
memory=memory,
|
|
|
336 |
|
337 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
338 |
sentences = sent_tokenize(text)
|
|
|
|
|
339 |
|
340 |
# initialize
|
341 |
length = 0
|
|
|
572 |
|
573 |
question_generator = LLMChain(llm=chat_llm, prompt=CONDENSE_QUESTION_PROMPT)
|
574 |
doc_chain = load_qa_chain(llm=chat_llm,chain_type="stuff",prompt=load_prompt())
|
575 |
+
chain = ConversationalRetrievalChain(retriever=_docsearch.as_retriever(search_kwags={"k": 3}),
|
576 |
question_generator=question_generator,
|
577 |
combine_docs_chain=doc_chain,
|
578 |
memory=memory,
|