Update app.py
Browse files
app.py
CHANGED
@@ -639,16 +639,25 @@ Example: letter_queue = deque(random.sample(string.ascii_uppercase, 10))"""
|
|
639 |
st.write('๐Run 1 is Complete.')
|
640 |
|
641 |
|
642 |
-
# -------------------------------
|
643 |
-
|
644 |
# New guys laughs
|
645 |
-
|
646 |
-
|
647 |
-
|
648 |
-
|
649 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
650 |
# ------------------------------------------------------------------------------------Thx----------
|
651 |
|
|
|
|
|
|
|
|
|
652 |
|
653 |
|
654 |
|
@@ -1114,6 +1123,38 @@ def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'):
|
|
1114 |
st.write(time.time() - start_time)
|
1115 |
return full_reply_content
|
1116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1117 |
@st.cache_resource
|
1118 |
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
|
1119 |
#def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
|
|
639 |
st.write('๐Run 1 is Complete.')
|
640 |
|
641 |
|
642 |
+
# ------------------------------- Land call to new guy here! --------------------------------------
|
|
|
643 |
# New guys laughs
|
644 |
+
#arxivmain(query)
|
645 |
+
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
646 |
+
result = client.predict(
|
647 |
+
"What is Semantic and Episodic memory?", # str in 'Search' Textbox component
|
648 |
+
4, # float (numeric value between 4 and 10) in 'Top n results as context' Slider component
|
649 |
+
"Semantic Search - up to 10 Mar 2024", # Literal['Semantic Search - up to 10 Mar 2024', 'Arxiv Search - Latest - (EXPERIMENTAL)'] in 'Search Source' Dropdown component
|
650 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
|
651 |
+
api_name="/update_with_rag_md"
|
652 |
+
)
|
653 |
+
st.markdown(result)
|
654 |
+
SpeechSynthesis(result)
|
655 |
# ------------------------------------------------------------------------------------Thx----------
|
656 |
|
657 |
+
# ๐Run 1+N - plain query
|
658 |
+
response = chat_with_model45(result) # experimental 45
|
659 |
+
all = query + ' ' + response
|
660 |
+
st.write('๐Run 1+N is Complete.')
|
661 |
|
662 |
|
663 |
|
|
|
1123 |
st.write(time.time() - start_time)
|
1124 |
return full_reply_content
|
1125 |
|
1126 |
+
# 11.1 45
|
1127 |
+
@st.cache_resource
|
1128 |
+
#def chat_with_model(prompt, document_section='', model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
|
1129 |
+
def chat_with_model45(prompt, document_section='', model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|
1130 |
+
model = model_choice
|
1131 |
+
conversation = [{'role': 'system', 'content': 'You are a coder, inventor, and writer of quotes on wisdom as a helpful expert in all fields of health, math, development and AI using python.'}]
|
1132 |
+
conversation.append({'role': 'user', 'content': prompt})
|
1133 |
+
if len(document_section)>0:
|
1134 |
+
conversation.append({'role': 'assistant', 'content': document_section})
|
1135 |
+
start_time = time.time()
|
1136 |
+
report = []
|
1137 |
+
res_box = st.empty()
|
1138 |
+
collected_chunks = []
|
1139 |
+
collected_messages = []
|
1140 |
+
|
1141 |
+
for chunk in openai.ChatCompletion.create(model=model_choice, messages=conversation, temperature=0.5, stream=True):
|
1142 |
+
collected_chunks.append(chunk)
|
1143 |
+
chunk_message = chunk['choices'][0]['delta']
|
1144 |
+
collected_messages.append(chunk_message)
|
1145 |
+
content=chunk["choices"][0].get("delta",{}).get("content")
|
1146 |
+
try:
|
1147 |
+
report.append(content)
|
1148 |
+
if len(content) > 0:
|
1149 |
+
result = "".join(report).strip()
|
1150 |
+
res_box.markdown(f'*{result}*')
|
1151 |
+
except:
|
1152 |
+
st.write(' ')
|
1153 |
+
full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
|
1154 |
+
st.write("Elapsed time:")
|
1155 |
+
st.write(time.time() - start_time)
|
1156 |
+
return full_reply_content
|
1157 |
+
|
1158 |
@st.cache_resource
|
1159 |
def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'): # gpt-4-0125-preview gpt-3.5-turbo
|
1160 |
#def chat_with_file_contents(prompt, file_content, model_choice='gpt-4-0125-preview'): # gpt-4-0125-preview gpt-3.5-turbo
|