awacke1 commited on
Commit
2fa5c4f
β€’
1 Parent(s): d9d228b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -111
app.py CHANGED
@@ -109,22 +109,34 @@ def search_arxiv(query):
109
  #llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
110
  llm_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
111
 
112
-
113
  st.sidebar.markdown('### πŸ”Ž ' + query)
114
- result = client.predict(
115
- search_query,
116
- 100,
117
- search_source,
118
- llm_model,
119
- api_name="/update_with_rag_md"
 
 
 
 
120
  )
121
- result = parse_to_markdown(result)
122
-
123
- #st.markdown(result)
124
- #arxiv_results = st.text_area("ArXiv Results: ", value=result, height=700)
125
-
126
- result = str(result) # cast as string for these - check content length and format if encoding changes..
127
- result=result.replace('\\n', ' ')
 
 
 
 
 
 
 
 
 
128
 
129
  SpeechSynthesis(result) # Search History Reader / Writer IO Memory - Audio at Same time as Reading.
130
 
@@ -262,85 +274,63 @@ roleplaying_glossary = {
262
  "Supports various AI algorithms and models"
263
  ]
264
  },
265
- "πŸš€ World Ship Design": {
266
- "ShipHullGAN 🌊": [
267
- "Generic parametric modeller for ship hull design",
268
- "Uses deep convolutional generative adversarial networks (GANs)",
269
- "Trained on diverse ship hull designs",
270
- "Generates geometrically valid and feasible ship hull shapes",
271
- "Enables exploration of traditional and novel designs",
272
- "From the paper 'ShipHullGAN: A generic parametric modeller for ship hull design using deep convolutional generative model'"
273
- ],
274
- "B\'ezierGAN πŸ“": [
275
- "Automatic generation of smooth curves",
276
- "Maps low-dimensional parameters to B\'ezier curve points",
277
- "Generates diverse and realistic curves",
278
- "Preserves shape variation in latent space",
279
- "Useful for design optimization and exploration",
280
- "From the paper 'B\'ezierGAN: Automatic Generation of Smooth Curves from Interpretable Low-Dimensional Parameters'"
281
- ],
282
- "PlotMap πŸ—ΊοΈ": [
283
- "Automated game world layout design",
284
- "Uses reinforcement learning to place plot elements",
285
- "Considers spatial constraints from story",
286
- "Enables procedural content generation for games",
287
- "Handles multi-modal inputs (images, locations, text)",
288
- "From the paper 'PlotMap: Automated Layout Design for Building Game Worlds'"
289
  ],
290
- "ShipGen βš“": [
291
- "Diffusion model for parametric ship hull generation",
292
- "Considers multiple objectives and constraints",
293
- "Generates tabular parametric design vectors",
294
- "Uses classifier guidance to improve hull quality",
295
- "Reduces design time and generates high-performing hulls",
296
- "From the paper 'ShipGen: A Diffusion Model for Parametric Ship Hull Generation with Multiple Objectives and Constraints'"
297
- ],
298
- "Ship-D πŸ“Š": [
299
- "Large dataset of ship hulls for machine learning",
300
- "30,000 hulls with design and performance data",
301
- "Includes parameterization, mesh, point cloud, images",
302
- "Measures hydrodynamic drag under different conditions",
303
- "Enables data-driven ship design optimization",
304
- "From the paper 'Ship-D: Ship Hull Dataset for Design Optimization using Machine Learning'"
305
- ]
306
- },
307
- "🌌 Exploring the Universe":{
308
- "Cosmos πŸͺ": [
309
- "Object-centric world modeling framework",
310
- "Designed for compositional generalization",
311
- "Uses neurosymbolic grounding",
312
- "Neurosymbolic scene encodings and attention mechanism",
313
- "Computes symbolic attributes using vision-language models",
314
- "From the paper 'Neurosymbolic Grounding for Compositional World Models'"
315
  ],
316
- "Active World Model Learning πŸ”­": [
317
- "Curiosity-driven exploration for world model learning",
318
- "Constructs agent to visually explore 3D environment",
319
- "Uses progress-based curiosity signal ($\gamma$-Progress)",
320
- "Overcomes 'white noise problem' in exploration",
321
- "Outperforms baseline exploration strategies",
322
- "From the paper 'Active World Model Learning with Progress Curiosity'"
 
 
 
 
 
323
  ],
324
- "Probabilistic Worldbuilding 🎲": [
325
- "Symbolic Bayesian model for semantic parsing and reasoning",
326
- "Aims for general natural language understanding",
327
- "Expresses meaning in human-readable formal language",
328
- "Designed to generalize to new domains and tasks",
329
- "Outperforms baselines on out-of-domain question answering",
330
- "From the paper 'Towards General Natural Language Understanding with Probabilistic Worldbuilding'"
331
  ],
332
- "Language-Guided World Models πŸ’¬": [
333
- "Capture environment dynamics from language descriptions",
334
- "Allow efficient communication and control",
335
- "Enable self-learning from human instruction texts",
336
- "Tested on challenging benchmark requiring generalization",
337
- "Improves interpretability and safety via generated plans",
338
- "From the paper 'Language-Guided World Models: A Model-Based Approach to AI Control'"
339
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  ]
341
  }
342
  }
343
 
 
344
  @st.cache_resource
345
  def get_table_download_link(file_path):
346
 
@@ -571,13 +561,12 @@ image_urls = [
571
  "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/cmCZ5RTdSx3usMm7MwwWK.png",
572
  ]
573
 
574
- try:
575
- selected_image_url = random.choice(image_urls)
576
- selected_image_base64 = get_image_as_base64(selected_image_url)
577
- if selected_image_base64 is not None:
578
- with st.sidebar:
579
- st.markdown(f"![image](data:image/png;base64,{selected_image_base64})")
580
- except:
581
  st.sidebar.write("Failed to load the image.")
582
 
583
  # Ensure the directory for storing scores exists
@@ -611,8 +600,10 @@ def load_score(key):
611
  return score_data["score"]
612
  return 0
613
 
 
 
614
  @st.cache_resource
615
- def search_glossary(query): # πŸ”Run--------------------------------------------------------
616
  for category, terms in roleplaying_glossary.items():
617
  if query.lower() in (term.lower() for term in terms):
618
  st.markdown(f"#### {category}")
@@ -622,11 +613,21 @@ def search_glossary(query): # πŸ”Run------------------------------------------
622
  # πŸ”Run 1 - plain query
623
  #response = chat_with_model(query)
624
  #response1 = chat_with_model45(query)
625
-
626
  #all = query + ' ' + response1
627
  #st.write('πŸ”Run 1 is Complete.')
628
 
629
- # ArXiv searcher ~-<>-~
 
 
 
 
 
 
 
 
 
 
 
630
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
631
  response1 = client.predict(
632
  query,
@@ -635,26 +636,23 @@ def search_glossary(query): # πŸ”Run------------------------------------------
635
  "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
636
  api_name="/update_with_rag_md"
637
  )
638
- st.write('πŸ”Run of Multi-Agent Systems is Complete')
639
-
640
- # experimental 45 - - - - - - - - - - - - -<><><><><>
641
 
 
 
 
642
 
 
643
  RunPostArxivLLM = False
644
-
645
  if RunPostArxivLLM:
646
-
647
  # πŸ”Run PaperSummarizer
648
  PaperSummarizer = ' Create a paper summary as a markdown table with paper links clustering the features writing short markdown emoji outlines to extract three main ideas from each of the ten summaries. For each one create three simple points led by an emoji of the main three steps needed as method step process for implementing the idea as a single app.py streamlit python app. '
649
- # = str(result).replace('\n', ' ').replace('|', ' ')
650
- # response2 = chat_with_model45(PaperSummarizer + str(response1))
651
  response2 = chat_with_model(PaperSummarizer + str(response1))
652
  st.write('πŸ”Run 3 - Paper Summarizer is Complete.')
653
 
654
  # πŸ”Run AppSpecifier
655
- AppSpecifier = ' Design and write a streamlit python code listing and specification that implements each scientific method steps as ten functions keeping specification in a markdown table in the function comments with original paper link to outline the AI pipeline ensemble implementing code as full plan to build.'
656
- #result = str(result).replace('\n', ' ').replace('|', ' ')
657
- # response3 = chat_with_model45(AppSpecifier + str(response2))
658
  response3 = chat_with_model(AppSpecifier + str(response2))
659
  st.write('πŸ”Run 4 - AppSpecifier is Complete.')
660
 
@@ -1377,7 +1375,6 @@ What is SORA?
1377
  '''
1378
 
1379
 
1380
- # Search History to ArXiv
1381
  session_state = {}
1382
  if "search_queries" not in session_state:
1383
  session_state["search_queries"] = []
@@ -1388,12 +1385,10 @@ if example_input:
1388
  # Search AI
1389
  query=example_input
1390
  if query:
1391
- try:
1392
- result = search_arxiv(query)
1393
- #search_glossary(query)
1394
- search_glossary(result)
1395
- except:
1396
- st.markdown(' ')
1397
 
1398
  st.write("Search history:")
1399
  for example_input in session_state["search_queries"]:
 
109
  #llm_model = st.sidebar.selectbox(key='llmmodel', label="LLM Model", ["mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.2", "google/gemma-7b-it", "None"])
110
  llm_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
111
 
 
112
  st.sidebar.markdown('### πŸ”Ž ' + query)
113
+
114
+
115
+
116
+ # ArXiv searcher ~-<>-~ Paper Summary - Ask LLM
117
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
118
+ response2 = client.predict(
119
+ query, # str in 'parameter_13' Textbox component
120
+ "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
121
+ True, # bool in 'Stream output' Checkbox component
122
+ api_name="/ask_llm"
123
  )
124
+ st.write('πŸ”Run of Multi-Agent System Paper Summary Spec is Complete')
125
+ st.markdown(response2)
126
+
127
+ # ArXiv searcher ~-<>-~ Paper References - Update with RAG
128
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
129
+ response1 = client.predict(
130
+ query,
131
+ 10,
132
+ "Semantic Search - up to 10 Mar 2024", # Literal['Semantic Search - up to 10 Mar 2024', 'Arxiv Search - Latest - (EXPERIMENTAL)'] in 'Search Source' Dropdown component
133
+ "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
134
+ api_name="/update_with_rag_md"
135
+ )
136
+ st.write('πŸ”Run of Multi-Agent System Paper References is Complete')
137
+ responseall = response1[0] + response1[1]
138
+ st.markdown(responseall)
139
+ result = response2 + responseall
140
 
141
  SpeechSynthesis(result) # Search History Reader / Writer IO Memory - Audio at Same time as Reading.
142
 
 
274
  "Supports various AI algorithms and models"
275
  ]
276
  },
277
+ "πŸ”¬ Science Topics": {
278
+ "Physics πŸ”­": [
279
+ "Astrophysics: galaxies, cosmology, planets, high energy phenomena, instrumentation, solar/stellar",
280
+
281
+ "Condensed Matter: disordered systems, materials science, nano/mesoscale, quantum gases, soft matter, statistical mechanics, superconductivity",
282
+ "General Relativity and Quantum Cosmology",
283
+ "High Energy Physics: experiment, lattice, phenomenology, theory",
284
+ "Mathematical Physics",
285
+ "Nonlinear Sciences: adaptation, cellular automata, chaos, solvable systems, pattern formation",
286
+ "Nuclear: experiment, theory",
287
+ "Physics: accelerators, atmospherics, atomic/molecular, biophysics, chemical, computational, education, fluids, geophysics, optics, plasma, popular, space"
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  ],
289
+ "Mathematics βž—": [
290
+ "Algebra: geometry, topology, number theory, combinatorics, representation theory",
291
+ "Analysis: PDEs, functional, numerical, spectral theory, ODEs, complex variables",
292
+ "Geometry: algebraic, differential, metric, symplectic, topological",
293
+ "Probability and Statistics",
294
+ "Applied Math: information theory, optimization and control"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  ],
296
+ "Computer Science πŸ’»": [
297
+ "Artificial Intelligence and Machine Learning",
298
+
299
+ "Computation and Language, Complexity, Engineering, Finance, Science",
300
+ "Computer Vision, Graphics, Robotics",
301
+ "Cryptography, Security, Blockchain",
302
+ "Data Structures, Algorithms, Databases",
303
+ "Distributed and Parallel Computing",
304
+ "Formal Languages, Automata, Logic",
305
+ "Information Theory, Signal Processing",
306
+ "Networks, Internet Architecture, Social Networks",
307
+ "Programming Languages, Software Engineering"
308
  ],
309
+ "Quantitative Biology 🧬": [
310
+ "Biomolecules, Cell Behavior, Genomics",
311
+ "Molecular Networks, Neurons and Cognition",
312
+ "Populations, Evolution, Ecology",
313
+ "Quantitative Methods, Subcellular Processes",
314
+ "Tissues, Organs, Organisms"
 
315
  ],
 
 
 
 
 
 
 
316
 
317
+ "Quantitative Finance πŸ“ˆ": [
318
+ "Computational and Mathematical Finance",
319
+ "Econometrics and Statistical Finance",
320
+
321
+ "Economics, Portfolio Management, Trading",
322
+ "Pricing, Risk Management"
323
+ ],
324
+ "Electrical Engineering πŸ”Œ": [
325
+ "Audio, Speech, Image and Video Processing",
326
+ "Communications and Information Theory",
327
+ "Signal Processing, Controls, Robotics",
328
+ "Electronic Circuits, Embedded Systems"
329
  ]
330
  }
331
  }
332
 
333
+
334
  @st.cache_resource
335
  def get_table_download_link(file_path):
336
 
 
561
  "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/cmCZ5RTdSx3usMm7MwwWK.png",
562
  ]
563
 
564
+ selected_image_url = random.choice(image_urls)
565
+ selected_image_base64 = get_image_as_base64(selected_image_url)
566
+ if selected_image_base64 is not None:
567
+ with st.sidebar:
568
+ st.markdown(f"![image](data:image/png;base64,{selected_image_base64})")
569
+ else:
 
570
  st.sidebar.write("Failed to load the image.")
571
 
572
  # Ensure the directory for storing scores exists
 
600
  return score_data["score"]
601
  return 0
602
 
603
+
604
+ # πŸ”Run--------------------------------------------------------
605
  @st.cache_resource
606
+ def search_glossary(query):
607
  for category, terms in roleplaying_glossary.items():
608
  if query.lower() in (term.lower() for term in terms):
609
  st.markdown(f"#### {category}")
 
613
  # πŸ”Run 1 - plain query
614
  #response = chat_with_model(query)
615
  #response1 = chat_with_model45(query)
 
616
  #all = query + ' ' + response1
617
  #st.write('πŸ”Run 1 is Complete.')
618
 
619
+ # ArXiv searcher ~-<>-~ Paper Summary - Ask LLM
620
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
621
+ response2 = client.predict(
622
+ query, # str in 'parameter_13' Textbox component
623
+ "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
624
+ True, # bool in 'Stream output' Checkbox component
625
+ api_name="/ask_llm"
626
+ )
627
+ st.write('πŸ”Run of Multi-Agent System Paper Summary Spec is Complete')
628
+ #st.markdown(response2)
629
+
630
+ # ArXiv searcher ~-<>-~ Paper References - Update with RAG
631
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
632
  response1 = client.predict(
633
  query,
 
636
  "mistralai/Mixtral-8x7B-Instruct-v0.1", # Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] in 'LLM Model' Dropdown component
637
  api_name="/update_with_rag_md"
638
  )
639
+ st.write('πŸ”Run of Multi-Agent System Paper References is Complete')
640
+ #st.markdown(response1)
 
641
 
642
+ responseall = response2 + response1[0] + response1[1]
643
+ st.markdown(responseall)
644
+ return responseall
645
 
646
+ # GPT 35 turbo and GPT 45 - - - - - - - - - - - - -<><><><><>:
647
  RunPostArxivLLM = False
 
648
  if RunPostArxivLLM:
 
649
  # πŸ”Run PaperSummarizer
650
  PaperSummarizer = ' Create a paper summary as a markdown table with paper links clustering the features writing short markdown emoji outlines to extract three main ideas from each of the ten summaries. For each one create three simple points led by an emoji of the main three steps needed as method step process for implementing the idea as a single app.py streamlit python app. '
 
 
651
  response2 = chat_with_model(PaperSummarizer + str(response1))
652
  st.write('πŸ”Run 3 - Paper Summarizer is Complete.')
653
 
654
  # πŸ”Run AppSpecifier
655
+ AppSpecifier = ' Design and write a streamlit python code listing and specification that implements each scientific method steps as ten functions keeping specification in a markdown table in the function comments with original paper link to outline the AI pipeline ensemble implementing code as full plan to build.'
 
 
656
  response3 = chat_with_model(AppSpecifier + str(response2))
657
  st.write('πŸ”Run 4 - AppSpecifier is Complete.')
658
 
 
1375
  '''
1376
 
1377
 
 
1378
  session_state = {}
1379
  if "search_queries" not in session_state:
1380
  session_state["search_queries"] = []
 
1385
  # Search AI
1386
  query=example_input
1387
  if query:
1388
+ result = search_arxiv(query)
1389
+ #search_glossary(query)
1390
+ search_glossary(result)
1391
+ st.markdown(' ')
 
 
1392
 
1393
  st.write("Search history:")
1394
  for example_input in session_state["search_queries"]: