Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -778,24 +778,35 @@ def main():
|
|
778 |
|
779 |
if next_action=='search':
|
780 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
781 |
-
st.write('
|
782 |
|
783 |
user_prompt = file_contents
|
784 |
|
785 |
# Llama versus GPT Battle!
|
786 |
-
|
787 |
-
|
788 |
-
|
789 |
-
|
790 |
-
|
|
|
|
|
|
|
|
|
|
|
791 |
|
792 |
# gpt
|
793 |
-
|
794 |
-
|
795 |
-
|
796 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
797 |
|
798 |
-
#st.experimental_rerun()
|
799 |
|
800 |
# Function to encode file to base64
|
801 |
def get_base64_encoded_file(file_path):
|
|
|
778 |
|
779 |
if next_action=='search':
|
780 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
781 |
+
st.write('🔍Running with Llama and GPT.')
|
782 |
|
783 |
user_prompt = file_contents
|
784 |
|
785 |
# Llama versus GPT Battle!
|
786 |
+
all=""
|
787 |
+
try:
|
788 |
+
st.write('🔍Running with Llama.')
|
789 |
+
response = StreamLLMChatResponse(file_contents)
|
790 |
+
filename = generate_filename(user_prompt, ".md")
|
791 |
+
create_file(filename, file_contents, response, should_save)
|
792 |
+
all=response
|
793 |
+
SpeechSynthesis(response)
|
794 |
+
except:
|
795 |
+
st.markdown('Llama is sleeping. Restart ETA 30 seconds.')
|
796 |
|
797 |
# gpt
|
798 |
+
try:
|
799 |
+
st.write('🔍Running with GPT.')
|
800 |
+
response2 = chat_with_model(user_prompt, file_contents, model_choice)
|
801 |
+
filename2 = generate_filename(file_contents, choice)
|
802 |
+
create_file(filename2, user_prompt, response, should_save)
|
803 |
+
all=all+response2
|
804 |
+
SpeechSynthesis(response2)
|
805 |
+
except:
|
806 |
+
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
807 |
+
|
808 |
+
SpeechSynthesis(all)
|
809 |
|
|
|
810 |
|
811 |
# Function to encode file to base64
|
812 |
def get_base64_encoded_file(file_path):
|