Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -544,8 +544,47 @@ memory.save_context(
|
|
544 |
|
545 |
|
546 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
547 |
|
548 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
549 |
|
550 |
|
551 |
|
|
|
544 |
|
545 |
|
546 |
|
547 |
+
questions = ["what does DTC stands for?",
|
548 |
+
"plaese give more details about it, including its use cases and implementation.",
|
549 |
+
"does it outperform other diffusion-based models? explain in details.",
|
550 |
+
"what is Langchain?"]
|
551 |
+
|
552 |
+
# Instantiate the retriever and the ConversationalRetrievalChain :
|
553 |
+
|
554 |
+
retriever_Google = retrieval_blocks(
|
555 |
+
create_vectorstore=False,
|
556 |
+
LLM_service="Google",
|
557 |
+
vectorstore_name="Vit_All_Google_Embeddings",
|
558 |
+
retriever_type="Cohere_reranker",
|
559 |
+
base_retriever_search_type="similarity", base_retriever_k=12,
|
560 |
+
compression_retriever_k=16,
|
561 |
+
cohere_api_key=cohere_api_key,cohere_top_n=10,
|
562 |
+
)
|
563 |
+
|
564 |
+
chain_gemini,memory_gemini = custom_ConversationalRetrievalChain(
|
565 |
+
llm = instantiate_LLM(
|
566 |
+
LLM_provider="Google",api_key=google_api_key,temperature=0.5,model_name="gemini-pro"
|
567 |
+
),
|
568 |
+
condense_question_llm = instantiate_LLM(
|
569 |
+
LLM_provider="Google",api_key=google_api_key,temperature=0.1,model_name="gemini-pro"),
|
570 |
+
retriever=retriever_Google,
|
571 |
+
language="english",
|
572 |
+
llm_provider="Google",
|
573 |
+
model_name="gemini-pro"
|
574 |
+
)
|
575 |
|
576 |
|
577 |
+
memory_gemini.clear()
|
578 |
+
|
579 |
+
for i,question in enumerate(questions):
|
580 |
+
response = chain_gemini.invoke({"question":question})
|
581 |
+
answer = response['answer'].content
|
582 |
+
print(f"Question[{i}]:",question)
|
583 |
+
print("Standalone_question:",response['standalone_question'])
|
584 |
+
print("Answer:\n",answer,f"\n\n{'-' * 100}\n")
|
585 |
+
|
586 |
+
memory_gemini.save_context( {"question": question}, {"answer": answer} ) # update memory
|
587 |
+
|
588 |
|
589 |
|
590 |
|