Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -709,7 +709,7 @@ chain = ConversationalRetrievalChain.from_llm(
|
|
709 |
)
|
710 |
|
711 |
|
712 |
-
|
713 |
# let's invoke the chain
|
714 |
response = chain.invoke({"question":"what does Google stand for?"})
|
715 |
print(response['answer'])
|
@@ -720,7 +720,7 @@ chain.memory.load_memory_variables({})
|
|
720 |
follow_up_question = "plaese give more details about it, including its use cases and implementation."
|
721 |
|
722 |
chain.invoke({"question":follow_up_question})['answer']
|
723 |
-
|
724 |
|
725 |
from langchain.chains import ConversationalRetrievalChain
|
726 |
|
@@ -781,8 +781,19 @@ Standalone question:""")
|
|
781 |
|
782 |
|
783 |
|
|
|
|
|
|
|
|
|
784 |
|
785 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
786 |
def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
|
787 |
|
788 |
|
@@ -796,7 +807,7 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
796 |
completion = chain.invoke({"question":prompt})
|
797 |
#chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
|
798 |
#completion = chain.run(input_documents=docs, question=query)
|
799 |
-
|
800 |
|
801 |
|
802 |
get_empty_state()
|
@@ -805,12 +816,13 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
805 |
|
806 |
completion = { "content": completion }
|
807 |
print("completion2")
|
808 |
-
print(completion)
|
|
|
809 |
|
810 |
|
811 |
|
812 |
#chat_messages = [(prompt_msg['content'], completion['content'])]
|
813 |
-
chat_messages = [(prompt, completion)]
|
814 |
return '', chat_messages, state # total_tokens_used_msg,
|
815 |
|
816 |
|
|
|
709 |
)
|
710 |
|
711 |
|
712 |
+
"""
|
713 |
# let's invoke the chain
|
714 |
response = chain.invoke({"question":"what does Google stand for?"})
|
715 |
print(response['answer'])
|
|
|
720 |
follow_up_question = "plaese give more details about it, including its use cases and implementation."
|
721 |
|
722 |
chain.invoke({"question":follow_up_question})['answer']
|
723 |
+
"""
|
724 |
|
725 |
from langchain.chains import ConversationalRetrievalChain
|
726 |
|
|
|
781 |
|
782 |
|
783 |
|
784 |
+
"""
|
785 |
+
# let's invoke the chain
|
786 |
+
response = chain.invoke({"question":"what does Google stand for?"})
|
787 |
+
print(response['answer'])
|
788 |
|
789 |
|
790 |
+
chain.memory.load_memory_variables({})
|
791 |
+
|
792 |
+
follow_up_question = "plaese give more details about it, including its use cases and implementation."
|
793 |
+
|
794 |
+
chain.invoke({"question":follow_up_question})['answer']
|
795 |
+
"""
|
796 |
+
|
797 |
def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
|
798 |
|
799 |
|
|
|
807 |
completion = chain.invoke({"question":prompt})
|
808 |
#chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
|
809 |
#completion = chain.run(input_documents=docs, question=query)
|
810 |
+
|
811 |
|
812 |
|
813 |
get_empty_state()
|
|
|
816 |
|
817 |
completion = { "content": completion }
|
818 |
print("completion2")
|
819 |
+
print(completion['content']['answer'])
|
820 |
+
print(completion['content']['answer'])
|
821 |
|
822 |
|
823 |
|
824 |
#chat_messages = [(prompt_msg['content'], completion['content'])]
|
825 |
+
chat_messages = [(prompt, completion['content']['answer'])]
|
826 |
return '', chat_messages, state # total_tokens_used_msg,
|
827 |
|
828 |
|