Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -791,7 +791,7 @@ chain.memory.load_memory_variables({})
|
|
791 |
|
792 |
follow_up_question = "plaese give more details about it, including its use cases and implementation."
|
793 |
|
794 |
-
chain.invoke({"question":follow_up_question})['answer']
|
795 |
"""
|
796 |
|
797 |
def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
|
@@ -799,15 +799,19 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
799 |
|
800 |
history = state['messages']
|
801 |
|
802 |
-
if not prompt:
|
803 |
-
|
804 |
|
805 |
prompt_template = prompt_templates[prompt_template]
|
806 |
|
807 |
completion = chain.invoke({"question":prompt})
|
|
|
|
|
808 |
#chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
|
809 |
#completion = chain.run(input_documents=docs, question=query)
|
810 |
-
|
|
|
|
|
811 |
|
812 |
|
813 |
get_empty_state()
|
@@ -815,11 +819,6 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
815 |
#state.append(completion.copy())
|
816 |
|
817 |
completion = { "content": completion }
|
818 |
-
print("completion2")
|
819 |
-
print(completion['content']['answer'])
|
820 |
-
print(completion['content']['answer'])
|
821 |
-
|
822 |
-
|
823 |
|
824 |
#chat_messages = [(prompt_msg['content'], completion['content'])]
|
825 |
chat_messages = [(prompt, completion['content']['answer'])]
|
|
|
791 |
|
792 |
follow_up_question = "plaese give more details about it, including its use cases and implementation."
|
793 |
|
794 |
+
chain.invoke({"question":follow_up_question})['answer'])
|
795 |
"""
|
796 |
|
797 |
def submit_message(prompt, prompt_template, temperature, max_tokens, context_length, state):
|
|
|
799 |
|
800 |
history = state['messages']
|
801 |
|
802 |
+
#if not prompt:
|
803 |
+
# return gr.update(value=''), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], state
|
804 |
|
805 |
prompt_template = prompt_templates[prompt_template]
|
806 |
|
807 |
completion = chain.invoke({"question":prompt})
|
808 |
+
print("completion")
|
809 |
+
print(completiion)
|
810 |
#chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff")
|
811 |
#completion = chain.run(input_documents=docs, question=query)
|
812 |
+
|
813 |
+
|
814 |
+
chain.memory.load_memory_variables({})
|
815 |
|
816 |
|
817 |
get_empty_state()
|
|
|
819 |
#state.append(completion.copy())
|
820 |
|
821 |
completion = { "content": completion }
|
|
|
|
|
|
|
|
|
|
|
822 |
|
823 |
#chat_messages = [(prompt_msg['content'], completion['content'])]
|
824 |
chat_messages = [(prompt, completion['content']['answer'])]
|