Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -576,8 +576,8 @@ def get_environment_variable(key):
|
|
576 |
|
577 |
|
578 |
|
579 |
-
|
580 |
-
def create_memory(model_name='gpt-3.5-turbo',memory_max_token=None):
|
581 |
"""Creates a ConversationSummaryBufferMemory for gpt-3.5-turbo.
|
582 |
Creates a ConversationBufferMemory for the other models."""
|
583 |
|
@@ -603,7 +603,8 @@ def create_memory(model_name='gpt-3.5-turbo',memory_max_token=None):
|
|
603 |
|
604 |
# Set a small memory_max_token, just to show how older messages are summarized if max_token_limit is exceeded.
|
605 |
|
606 |
-
memory = create_memory(model_name='
|
|
|
607 |
|
608 |
# save context
|
609 |
memory.save_context(
|
@@ -734,7 +735,8 @@ def create_ConversationalRetrievalChain(
|
|
734 |
retriever,
|
735 |
chain_type= 'stuff',
|
736 |
language="english",
|
737 |
-
model_name='
|
|
|
738 |
):
|
739 |
"""Create a ConversationalRetrievalChain.
|
740 |
First, it passes the follow-up question along with the chat history to an LLM which rephrases
|
@@ -773,8 +775,10 @@ Standalone question:""")
|
|
773 |
|
774 |
memory=memory,
|
775 |
retriever = retriever,
|
776 |
-
llm=llm,
|
777 |
-
|
|
|
|
|
778 |
chain_type= chain_type,
|
779 |
verbose= False,
|
780 |
return_source_documents=True
|
|
|
576 |
|
577 |
|
578 |
|
579 |
+
def create_memory(model_name='gemini-pro',memory_max_token=None):
|
580 |
+
#def create_memory(model_name='gpt-3.5-turbo',memory_max_token=None):
|
581 |
"""Creates a ConversationSummaryBufferMemory for gpt-3.5-turbo.
|
582 |
Creates a ConversationBufferMemory for the other models."""
|
583 |
|
|
|
603 |
|
604 |
# Set a small memory_max_token, just to show how older messages are summarized if max_token_limit is exceeded.
|
605 |
|
606 |
+
memory = create_memory(model_name='gemini-pro',memory_max_token=None)
|
607 |
+
#memory = create_memory(model_name='gpt-3.5-turbo',memory_max_token=20)
|
608 |
|
609 |
# save context
|
610 |
memory.save_context(
|
|
|
735 |
retriever,
|
736 |
chain_type= 'stuff',
|
737 |
language="english",
|
738 |
+
model_name='gemini-pro'
|
739 |
+
#model_name='gpt-3.5-turbo'
|
740 |
):
|
741 |
"""Create a ConversationalRetrievalChain.
|
742 |
First, it passes the follow-up question along with the chat history to an LLM which rephrases
|
|
|
775 |
|
776 |
memory=memory,
|
777 |
retriever = retriever,
|
778 |
+
#llm=llm,
|
779 |
+
llm=instantiate_LLM(
|
780 |
+
LLM_provider="Google",api_key=google_api_key,temperature=0.5,
|
781 |
+
model_name="gemini-pro"),
|
782 |
chain_type= chain_type,
|
783 |
verbose= False,
|
784 |
return_source_documents=True
|