Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
|
|
2 |
import streamlit.components.v1 as components
|
3 |
import huggingface_hub
|
4 |
import gradio_client as gc
|
|
|
5 |
import os
|
6 |
import json
|
7 |
import random
|
@@ -16,6 +17,7 @@ import textract
|
|
16 |
import time
|
17 |
import zipfile
|
18 |
import dotenv
|
|
|
19 |
from gradio_client import Client
|
20 |
from audio_recorder_streamlit import audio_recorder
|
21 |
from bs4 import BeautifulSoup
|
@@ -31,11 +33,26 @@ from xml.etree import ElementTree as ET
|
|
31 |
from PIL import Image
|
32 |
from urllib.parse import quote # Ensure this import is included
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def load_file(file_name):
|
35 |
with open(file_name, "r") as file:
|
36 |
content = file.read()
|
37 |
return content
|
38 |
|
|
|
39 |
# HTML5 based Speech Synthesis (Text to Speech in Browser)
|
40 |
@st.cache_resource
|
41 |
def SpeechSynthesis(result):
|
@@ -67,24 +84,33 @@ def SpeechSynthesis(result):
|
|
67 |
components.html(documentHTML5, width=1280, height=300)
|
68 |
|
69 |
def parse_to_markdown(text):
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
77 |
return markdown
|
78 |
|
79 |
def search_arxiv(query):
|
|
|
80 |
# Show ArXiv Scholary Articles! ----------------*************-------------***************----------------------------------------
|
|
|
81 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
|
|
82 |
search_query = query
|
83 |
#top_n_results = st.slider(key='topnresults', label="Top n results as context", min_value=4, max_value=100, value=100)
|
84 |
#search_source = st.sidebar.selectbox(key='searchsource', label="Search Source", ["Semantic Search - up to 10 Mar 2024", "Arxiv Search - Latest - (EXPERIMENTAL)"])
|
85 |
search_source = "Arxiv Search - Latest - (EXPERIMENTAL)" # "Semantic Search - up to 10 Mar 2024"
|
86 |
#llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
|
87 |
llm_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
|
|
|
|
88 |
st.sidebar.markdown('### π ' + query)
|
89 |
result = client.predict(
|
90 |
search_query,
|
@@ -101,8 +127,16 @@ def search_arxiv(query):
|
|
101 |
SpeechSynthesis(result) # Search History Reader / Writer IO Memory - Audio at Same time as Reading.
|
102 |
filename=generate_filename(result, "md")
|
103 |
create_file(filename, query, result, should_save)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
saved_files = [f for f in os.listdir(".") if f.endswith(".md")]
|
105 |
selected_file = st.sidebar.selectbox("Saved Files", saved_files)
|
|
|
106 |
if selected_file:
|
107 |
file_content = load_file(selected_file)
|
108 |
st.sidebar.markdown(file_content)
|
@@ -143,6 +177,7 @@ def display_glossary_grid(roleplaying_glossary):
|
|
143 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/World-Ship-Design?q={quote(k)}-{quote(PromptPrefix2)}", # this url plus query!
|
144 |
"π¬": lambda k: f"https://huggingface.co/spaces/awacke1/World-Ship-Design?q={quote(k)}-{quote(PromptPrefix3)}", # this url plus query!
|
145 |
}
|
|
|
146 |
for category, details in roleplaying_glossary.items():
|
147 |
st.write(f"### {category}")
|
148 |
cols = st.columns(len(details)) # Create dynamic columns based on the number of games
|
@@ -152,6 +187,7 @@ def display_glossary_grid(roleplaying_glossary):
|
|
152 |
for term in terms:
|
153 |
gameterm = category + ' - ' + game + ' - ' + term
|
154 |
links_md = ' '.join([f"[{emoji}]({url(gameterm)})" for emoji, url in search_urls.items()])
|
|
|
155 |
st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
|
156 |
|
157 |
def display_glossary_entity(k):
|
@@ -170,8 +206,11 @@ def display_glossary_entity(k):
|
|
170 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
171 |
st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
|
172 |
|
|
|
|
|
|
|
|
|
173 |
roleplaying_glossary = {
|
174 |
-
|
175 |
"π€ AI Concepts": {
|
176 |
"MoE (Mixture of Experts) π§ ": [
|
177 |
"What are Multi Agent Systems for Health",
|
@@ -202,7 +241,85 @@ roleplaying_glossary = {
|
|
202 |
"Research in knowledge representation and retrieval"
|
203 |
]
|
204 |
},
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
"π Exploring the Universe":{
|
207 |
"Cosmos πͺ": [
|
208 |
"Object-centric world modeling framework",
|
@@ -266,6 +383,7 @@ def get_table_download_link(file_path):
|
|
266 |
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
267 |
return href
|
268 |
|
|
|
269 |
@st.cache_resource
|
270 |
def create_zip_of_files(files): # ----------------------------------
|
271 |
zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
|
@@ -284,6 +402,7 @@ def get_zip_download_link(zip_file):
|
|
284 |
|
285 |
def FileSidebar():
|
286 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
|
|
287 |
all_files = glob.glob("*.md")
|
288 |
all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
|
289 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
|
@@ -319,16 +438,19 @@ def FileSidebar():
|
|
319 |
if st.button("π", key="delete_"+file):
|
320 |
os.remove(file)
|
321 |
st.experimental_rerun()
|
|
|
322 |
|
323 |
if len(file_contents) > 0:
|
324 |
if next_action=='open':
|
325 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
|
|
326 |
if st.button("π", key="filecontentssearch"):
|
327 |
#search_glossary(file_content_area)
|
328 |
filesearch = PromptPrefix + file_content_area
|
329 |
st.markdown(filesearch)
|
330 |
if st.button(key=rerun, label='πRe-Spec' ):
|
331 |
search_glossary(filesearch)
|
|
|
332 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
333 |
|
334 |
if next_action=='md':
|
@@ -336,21 +458,28 @@ def FileSidebar():
|
|
336 |
buttonlabel = 'πRun'
|
337 |
if st.button(key='Runmd', label = buttonlabel):
|
338 |
user_prompt = file_contents
|
|
|
339 |
search_glossary(file_contents)
|
|
|
340 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
341 |
|
342 |
if next_action=='search':
|
343 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
344 |
user_prompt = file_contents
|
|
|
|
|
345 |
filesearch = PromptPrefix2 + file_content_area
|
346 |
st.markdown(filesearch)
|
347 |
if st.button(key=rerun, label='πRe-Code' ):
|
348 |
search_glossary(filesearch)
|
349 |
-
|
|
|
350 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
351 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
352 |
FileSidebar()
|
353 |
|
|
|
|
|
354 |
# ---- Art Card Sidebar with Random Selection of image:
|
355 |
def get_image_as_base64(url):
|
356 |
response = requests.get(url)
|
@@ -665,6 +794,8 @@ def clear_query_params():
|
|
665 |
|
666 |
# My Inference API Copy
|
667 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
|
|
|
|
668 |
API_KEY = os.getenv('API_KEY')
|
669 |
MODEL1="meta-llama/Llama-2-7b-chat-hf"
|
670 |
MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
|
@@ -678,6 +809,8 @@ prompt = "...."
|
|
678 |
should_save = st.sidebar.checkbox("πΎ Save", value=True, help="Save your session data.")
|
679 |
|
680 |
|
|
|
|
|
681 |
# 3. Stream Llama Response
|
682 |
# @st.cache_resource
|
683 |
def StreamLLMChatResponse(prompt):
|
@@ -1120,11 +1253,28 @@ if 'action' in st.query_params:
|
|
1120 |
clear_query_params()
|
1121 |
st.experimental_rerun()
|
1122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1123 |
if 'query' in st.query_params:
|
1124 |
query = st.query_params['query'][0] # Get the query parameter
|
|
|
1125 |
display_content_or_image(query)
|
1126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1127 |
st.markdown("### π²πΊοΈ Arxiv Paper Search QA RAG MAS using Streamlit and Gradio API")
|
|
|
1128 |
filename = save_and_play_audio(audio_recorder)
|
1129 |
if filename is not None:
|
1130 |
transcription = transcribe_audio(filename)
|
@@ -1158,6 +1308,8 @@ if filename is not None:
|
|
1158 |
os.remove(filename)
|
1159 |
|
1160 |
|
|
|
|
|
1161 |
prompt = '''
|
1162 |
What is MoE?
|
1163 |
What are Multi Agent Systems?
|
@@ -1203,6 +1355,10 @@ if openai.api_key == None: openai.api_key = st.secrets['OPENAI_API_KEY']
|
|
1203 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1204 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1205 |
|
|
|
|
|
|
|
|
|
1206 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1207 |
with collength:
|
1208 |
max_length = st.slider(key='maxlength', label="File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
@@ -1235,6 +1391,20 @@ if len(document_sections) > 0:
|
|
1235 |
create_file(filename, user_prompt, response, should_save)
|
1236 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1237 |
|
1238 |
-
|
1239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1240 |
display_images_and_wikipedia_summaries() # Image Jump Grid
|
|
|
|
|
|
|
|
2 |
import streamlit.components.v1 as components
|
3 |
import huggingface_hub
|
4 |
import gradio_client as gc
|
5 |
+
|
6 |
import os
|
7 |
import json
|
8 |
import random
|
|
|
17 |
import time
|
18 |
import zipfile
|
19 |
import dotenv
|
20 |
+
|
21 |
from gradio_client import Client
|
22 |
from audio_recorder_streamlit import audio_recorder
|
23 |
from bs4 import BeautifulSoup
|
|
|
33 |
from PIL import Image
|
34 |
from urllib.parse import quote # Ensure this import is included
|
35 |
|
36 |
+
|
37 |
+
## Show examples
|
38 |
+
sample_outputs = {
|
39 |
+
'output_placeholder': 'The LLM will provide an answer to your question here...',
|
40 |
+
'search_placeholder': '1. What is MoE, Multi Agent Systems, Self Rewarding AI, Semantic and Episodic memory, What is AutoGen, ChatDev, Omniverse, Lumiere, SORA?'
|
41 |
+
}
|
42 |
+
|
43 |
+
def save_file(content, file_type):
|
44 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
45 |
+
file_name = f"{file_type}_{timestamp}.md"
|
46 |
+
with open(file_name, "w") as file:
|
47 |
+
file.write(content)
|
48 |
+
return file_name
|
49 |
+
|
50 |
def load_file(file_name):
|
51 |
with open(file_name, "r") as file:
|
52 |
content = file.read()
|
53 |
return content
|
54 |
|
55 |
+
|
56 |
# HTML5 based Speech Synthesis (Text to Speech in Browser)
|
57 |
@st.cache_resource
|
58 |
def SpeechSynthesis(result):
|
|
|
84 |
components.html(documentHTML5, width=1280, height=300)
|
85 |
|
86 |
def parse_to_markdown(text):
|
87 |
+
# Split text into fields by | character
|
88 |
+
fields = text.split("|")
|
89 |
+
|
90 |
+
markdown = ""
|
91 |
+
for field in fields:
|
92 |
+
# Remove leading/trailing quotes and whitespace
|
93 |
+
field = field.strip(" '")
|
94 |
+
|
95 |
+
# Add field to markdown with whitespace separator
|
96 |
+
markdown += field + "\n\n"
|
97 |
+
|
98 |
return markdown
|
99 |
|
100 |
def search_arxiv(query):
|
101 |
+
|
102 |
# Show ArXiv Scholary Articles! ----------------*************-------------***************----------------------------------------
|
103 |
+
# st.title("βΆοΈ Semantic and Episodic Memory System")
|
104 |
client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
|
105 |
+
|
106 |
search_query = query
|
107 |
#top_n_results = st.slider(key='topnresults', label="Top n results as context", min_value=4, max_value=100, value=100)
|
108 |
#search_source = st.sidebar.selectbox(key='searchsource', label="Search Source", ["Semantic Search - up to 10 Mar 2024", "Arxiv Search - Latest - (EXPERIMENTAL)"])
|
109 |
search_source = "Arxiv Search - Latest - (EXPERIMENTAL)" # "Semantic Search - up to 10 Mar 2024"
|
110 |
#llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
|
111 |
llm_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
112 |
+
|
113 |
+
|
114 |
st.sidebar.markdown('### π ' + query)
|
115 |
result = client.predict(
|
116 |
search_query,
|
|
|
127 |
SpeechSynthesis(result) # Search History Reader / Writer IO Memory - Audio at Same time as Reading.
|
128 |
filename=generate_filename(result, "md")
|
129 |
create_file(filename, query, result, should_save)
|
130 |
+
|
131 |
+
|
132 |
+
#file_type = st.radio("Select Which Type of Memory You Prefer:", ("Semantic", "Episodic"))
|
133 |
+
#if st.button("Save"):
|
134 |
+
# file_name = save_file(result, file_type)
|
135 |
+
# st.success(f"File saved: {file_name}")
|
136 |
+
|
137 |
saved_files = [f for f in os.listdir(".") if f.endswith(".md")]
|
138 |
selected_file = st.sidebar.selectbox("Saved Files", saved_files)
|
139 |
+
|
140 |
if selected_file:
|
141 |
file_content = load_file(selected_file)
|
142 |
st.sidebar.markdown(file_content)
|
|
|
177 |
"π": lambda k: f"https://huggingface.co/spaces/awacke1/World-Ship-Design?q={quote(k)}-{quote(PromptPrefix2)}", # this url plus query!
|
178 |
"π¬": lambda k: f"https://huggingface.co/spaces/awacke1/World-Ship-Design?q={quote(k)}-{quote(PromptPrefix3)}", # this url plus query!
|
179 |
}
|
180 |
+
|
181 |
for category, details in roleplaying_glossary.items():
|
182 |
st.write(f"### {category}")
|
183 |
cols = st.columns(len(details)) # Create dynamic columns based on the number of games
|
|
|
187 |
for term in terms:
|
188 |
gameterm = category + ' - ' + game + ' - ' + term
|
189 |
links_md = ' '.join([f"[{emoji}]({url(gameterm)})" for emoji, url in search_urls.items()])
|
190 |
+
#links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()])
|
191 |
st.markdown(f"{term} {links_md}", unsafe_allow_html=True)
|
192 |
|
193 |
def display_glossary_entity(k):
|
|
|
206 |
links_md = ' '.join([f"[{emoji}]({url(k)})" for emoji, url in search_urls.items()])
|
207 |
st.markdown(f"{k} {links_md}", unsafe_allow_html=True)
|
208 |
|
209 |
+
|
210 |
+
|
211 |
+
#st.markdown('''### πβ¨π Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP ''')
|
212 |
+
|
213 |
roleplaying_glossary = {
|
|
|
214 |
"π€ AI Concepts": {
|
215 |
"MoE (Mixture of Experts) π§ ": [
|
216 |
"What are Multi Agent Systems for Health",
|
|
|
241 |
"Research in knowledge representation and retrieval"
|
242 |
]
|
243 |
},
|
244 |
+
"π οΈ AI Tools & Platforms": {
|
245 |
+
"AutoGen π§": [
|
246 |
+
"Automated machine learning (AutoML) tool",
|
247 |
+
"Generates AI models based on requirements",
|
248 |
+
"Simplifies AI development process",
|
249 |
+
"Accessible to non-experts",
|
250 |
+
"Integration with various data sources"
|
251 |
+
],
|
252 |
+
"ChatDev π¬": [
|
253 |
+
"Platform for building chatbots and conversational AI",
|
254 |
+
"Drag-and-drop interface for designing chat flows",
|
255 |
+
"Pre-built templates and integrations",
|
256 |
+
"Supports multiple messaging platforms",
|
257 |
+
"Analytics and performance tracking"
|
258 |
+
],
|
259 |
+
"Omniverse π": [
|
260 |
+
"Nvidia's 3D simulation and collaboration platform",
|
261 |
+
"Physically accurate virtual worlds",
|
262 |
+
"Supports AI training and testing",
|
263 |
+
"Used in industries like robotics, architecture, and gaming",
|
264 |
+
"Enables seamless collaboration and data exchange"
|
265 |
+
],
|
266 |
+
"Lumiere π₯": [
|
267 |
+
"AI-powered video analytics platform",
|
268 |
+
"Extracts insights and metadata from video content",
|
269 |
+
"Facial recognition and object detection",
|
270 |
+
"Sentiment analysis and scene understanding",
|
271 |
+
"Applications in security, media, and marketing"
|
272 |
+
],
|
273 |
+
"SORA ποΈ": [
|
274 |
+
"Scalable Open Research Architecture",
|
275 |
+
"Framework for distributed AI research and development",
|
276 |
+
"Modular and extensible design",
|
277 |
+
"Facilitates collaboration and reproducibility",
|
278 |
+
"Supports various AI algorithms and models"
|
279 |
+
]
|
280 |
+
},
|
281 |
+
"π World Ship Design": {
|
282 |
+
"ShipHullGAN π": [
|
283 |
+
"Generic parametric modeller for ship hull design",
|
284 |
+
"Uses deep convolutional generative adversarial networks (GANs)",
|
285 |
+
"Trained on diverse ship hull designs",
|
286 |
+
"Generates geometrically valid and feasible ship hull shapes",
|
287 |
+
"Enables exploration of traditional and novel designs",
|
288 |
+
"From the paper 'ShipHullGAN: A generic parametric modeller for ship hull design using deep convolutional generative model'"
|
289 |
+
],
|
290 |
+
"B\'ezierGAN π": [
|
291 |
+
"Automatic generation of smooth curves",
|
292 |
+
"Maps low-dimensional parameters to B\'ezier curve points",
|
293 |
+
"Generates diverse and realistic curves",
|
294 |
+
"Preserves shape variation in latent space",
|
295 |
+
"Useful for design optimization and exploration",
|
296 |
+
"From the paper 'B\'ezierGAN: Automatic Generation of Smooth Curves from Interpretable Low-Dimensional Parameters'"
|
297 |
+
],
|
298 |
+
"PlotMap πΊοΈ": [
|
299 |
+
"Automated game world layout design",
|
300 |
+
"Uses reinforcement learning to place plot elements",
|
301 |
+
"Considers spatial constraints from story",
|
302 |
+
"Enables procedural content generation for games",
|
303 |
+
"Handles multi-modal inputs (images, locations, text)",
|
304 |
+
"From the paper 'PlotMap: Automated Layout Design for Building Game Worlds'"
|
305 |
+
],
|
306 |
+
"ShipGen β": [
|
307 |
+
"Diffusion model for parametric ship hull generation",
|
308 |
+
"Considers multiple objectives and constraints",
|
309 |
+
"Generates tabular parametric design vectors",
|
310 |
+
"Uses classifier guidance to improve hull quality",
|
311 |
+
"Reduces design time and generates high-performing hulls",
|
312 |
+
"From the paper 'ShipGen: A Diffusion Model for Parametric Ship Hull Generation with Multiple Objectives and Constraints'"
|
313 |
+
],
|
314 |
+
"Ship-D π": [
|
315 |
+
"Large dataset of ship hulls for machine learning",
|
316 |
+
"30,000 hulls with design and performance data",
|
317 |
+
"Includes parameterization, mesh, point cloud, images",
|
318 |
+
"Measures hydrodynamic drag under different conditions",
|
319 |
+
"Enables data-driven ship design optimization",
|
320 |
+
"From the paper 'Ship-D: Ship Hull Dataset for Design Optimization using Machine Learning'"
|
321 |
+
]
|
322 |
+
},
|
323 |
"π Exploring the Universe":{
|
324 |
"Cosmos πͺ": [
|
325 |
"Object-centric world modeling framework",
|
|
|
383 |
href = f'<a href="data:{mime_type};base64,{b64}" target="_blank" download="{file_name}">{file_name}</a>'
|
384 |
return href
|
385 |
|
386 |
+
|
387 |
@st.cache_resource
|
388 |
def create_zip_of_files(files): # ----------------------------------
|
389 |
zip_name = "Arxiv-Paper-Search-QA-RAG-Streamlit-Gradio-AP.zip"
|
|
|
402 |
|
403 |
def FileSidebar():
|
404 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
405 |
+
# Compose a file sidebar of markdown md files:
|
406 |
all_files = glob.glob("*.md")
|
407 |
all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names
|
408 |
all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
|
|
|
438 |
if st.button("π", key="delete_"+file):
|
439 |
os.remove(file)
|
440 |
st.experimental_rerun()
|
441 |
+
|
442 |
|
443 |
if len(file_contents) > 0:
|
444 |
if next_action=='open':
|
445 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
446 |
+
#try:
|
447 |
if st.button("π", key="filecontentssearch"):
|
448 |
#search_glossary(file_content_area)
|
449 |
filesearch = PromptPrefix + file_content_area
|
450 |
st.markdown(filesearch)
|
451 |
if st.button(key=rerun, label='πRe-Spec' ):
|
452 |
search_glossary(filesearch)
|
453 |
+
#except:
|
454 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
455 |
|
456 |
if next_action=='md':
|
|
|
458 |
buttonlabel = 'πRun'
|
459 |
if st.button(key='Runmd', label = buttonlabel):
|
460 |
user_prompt = file_contents
|
461 |
+
#try:
|
462 |
search_glossary(file_contents)
|
463 |
+
#except:
|
464 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
465 |
|
466 |
if next_action=='search':
|
467 |
file_content_area = st.text_area("File Contents:", file_contents, height=500)
|
468 |
user_prompt = file_contents
|
469 |
+
#try:
|
470 |
+
#search_glossary(file_contents)
|
471 |
filesearch = PromptPrefix2 + file_content_area
|
472 |
st.markdown(filesearch)
|
473 |
if st.button(key=rerun, label='πRe-Code' ):
|
474 |
search_glossary(filesearch)
|
475 |
+
|
476 |
+
#except:
|
477 |
st.markdown('GPT is sleeping. Restart ETA 30 seconds.')
|
478 |
# ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------
|
479 |
FileSidebar()
|
480 |
|
481 |
+
|
482 |
+
|
483 |
# ---- Art Card Sidebar with Random Selection of image:
|
484 |
def get_image_as_base64(url):
|
485 |
response = requests.get(url)
|
|
|
794 |
|
795 |
# My Inference API Copy
|
796 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
797 |
+
# Meta's Original - Chat HF Free Version:
|
798 |
+
#API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf"
|
799 |
API_KEY = os.getenv('API_KEY')
|
800 |
MODEL1="meta-llama/Llama-2-7b-chat-hf"
|
801 |
MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf"
|
|
|
809 |
should_save = st.sidebar.checkbox("πΎ Save", value=True, help="Save your session data.")
|
810 |
|
811 |
|
812 |
+
|
813 |
+
|
814 |
# 3. Stream Llama Response
|
815 |
# @st.cache_resource
|
816 |
def StreamLLMChatResponse(prompt):
|
|
|
1253 |
clear_query_params()
|
1254 |
st.experimental_rerun()
|
1255 |
|
1256 |
+
# Handling repeated keys
|
1257 |
+
#if 'multi' in st.query_params:
|
1258 |
+
# multi_values = get_all_query_params('multi')
|
1259 |
+
# st.write("Values for 'multi':", multi_values)
|
1260 |
+
|
1261 |
+
# Manual entry for demonstration
|
1262 |
+
#st.write("Enter query parameters in the URL like this: ?action=show_message&multi=1&multi=2")
|
1263 |
+
|
1264 |
if 'query' in st.query_params:
|
1265 |
query = st.query_params['query'][0] # Get the query parameter
|
1266 |
+
# Display content or image based on the query
|
1267 |
display_content_or_image(query)
|
1268 |
|
1269 |
+
# Add a clear query parameters button for convenience
|
1270 |
+
#if st.button("Clear Query Parameters", key='ClearQueryParams'):
|
1271 |
+
# This will clear the browser URL's query parameters
|
1272 |
+
# st.experimental_set_query_params
|
1273 |
+
# st.experimental_rerun()
|
1274 |
+
|
1275 |
+
|
1276 |
st.markdown("### π²πΊοΈ Arxiv Paper Search QA RAG MAS using Streamlit and Gradio API")
|
1277 |
+
|
1278 |
filename = save_and_play_audio(audio_recorder)
|
1279 |
if filename is not None:
|
1280 |
transcription = transcribe_audio(filename)
|
|
|
1308 |
os.remove(filename)
|
1309 |
|
1310 |
|
1311 |
+
|
1312 |
+
|
1313 |
prompt = '''
|
1314 |
What is MoE?
|
1315 |
What are Multi Agent Systems?
|
|
|
1355 |
menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
|
1356 |
choice = st.sidebar.selectbox("Output File Type:", menu)
|
1357 |
|
1358 |
+
#model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
|
1359 |
+
#user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
|
1360 |
+
|
1361 |
+
|
1362 |
collength, colupload = st.columns([2,3]) # adjust the ratio as needed
|
1363 |
with collength:
|
1364 |
max_length = st.slider(key='maxlength', label="File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
|
|
|
1391 |
create_file(filename, user_prompt, response, should_save)
|
1392 |
st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
1393 |
|
1394 |
+
#if st.button('π¬ Chat'):
|
1395 |
+
# st.write('Reasoning with your inputs...')
|
1396 |
+
# user_prompt_sections = divide_prompt(user_prompt, max_length)
|
1397 |
+
# full_response = ''
|
1398 |
+
# for prompt_section in user_prompt_sections:
|
1399 |
+
# response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
1400 |
+
# full_response += response + '\n' # Combine the responses
|
1401 |
+
# response = full_response
|
1402 |
+
# st.write('Response:')
|
1403 |
+
# st.write(response)
|
1404 |
+
# filename = generate_filename(user_prompt, choice)
|
1405 |
+
# create_file(filename, user_prompt, response, should_save)
|
1406 |
+
|
1407 |
display_images_and_wikipedia_summaries() # Image Jump Grid
|
1408 |
+
display_videos_and_links() # Video Jump Grid
|
1409 |
+
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid
|
1410 |
+
#display_buttons_with_scores() # Feedback Jump Grid
|