Files changed (47) hide show
  1. .gitignore +0 -6
  2. README.md +1 -1
  3. app.py +404 -426
  4. climateqa/constants.py +1 -24
  5. climateqa/engine/chains/__init__.py +0 -0
  6. climateqa/engine/chains/answer_ai_impact.py +0 -46
  7. climateqa/engine/chains/answer_chitchat.py +0 -56
  8. climateqa/engine/chains/chitchat_categorization.py +0 -43
  9. climateqa/engine/chains/graph_retriever.py +0 -128
  10. climateqa/engine/chains/intent_categorization.py +0 -90
  11. climateqa/engine/chains/keywords_extraction.py +0 -40
  12. climateqa/engine/chains/query_transformation.py +0 -201
  13. climateqa/engine/chains/retrieve_documents.py +0 -310
  14. climateqa/engine/chains/retrieve_papers.py +0 -95
  15. climateqa/engine/chains/retriever.py +0 -126
  16. climateqa/engine/chains/sample_router.py +0 -66
  17. climateqa/engine/chains/set_defaults.py +0 -13
  18. climateqa/engine/chains/translation.py +0 -42
  19. climateqa/engine/embeddings.py +3 -6
  20. climateqa/engine/graph.py +0 -192
  21. climateqa/engine/graph_retriever.py +0 -88
  22. climateqa/engine/keywords.py +1 -3
  23. climateqa/engine/llm/__init__.py +0 -3
  24. climateqa/engine/llm/ollama.py +0 -6
  25. climateqa/engine/llm/openai.py +1 -1
  26. climateqa/engine/{chains/prompts.py → prompts.py} +3 -26
  27. climateqa/engine/{chains/answer_rag.py → rag.py} +58 -36
  28. climateqa/engine/{chains/reformulation.py → reformulation.py} +1 -1
  29. climateqa/engine/reranker.py +0 -50
  30. climateqa/engine/retriever.py +163 -0
  31. climateqa/engine/utils.py +0 -17
  32. climateqa/engine/vectorstore.py +2 -4
  33. climateqa/event_handler.py +0 -123
  34. climateqa/knowledge/__init__.py +0 -0
  35. climateqa/knowledge/retriever.py +0 -102
  36. climateqa/papers/__init__.py +43 -0
  37. climateqa/{knowledge → papers}/openalex.py +15 -68
  38. climateqa/utils.py +0 -13
  39. front/__init__.py +0 -0
  40. front/callbacks.py +0 -0
  41. front/utils.py +0 -341
  42. requirements.txt +6 -14
  43. sandbox/20240310 - CQA - Semantic Routing 1.ipynb +0 -0
  44. sandbox/20240702 - CQA - Graph Functionality.ipynb +0 -0
  45. sandbox/20241104 - CQA - StepByStep CQA.ipynb +0 -0
  46. style.css +66 -438
  47. test.json +0 -0
.gitignore CHANGED
@@ -5,9 +5,3 @@ __pycache__/utils.cpython-38.pyc
5
 
6
  notebooks/
7
  *.pyc
8
-
9
- **/.ipynb_checkpoints/
10
- **/.flashrank_cache/
11
-
12
- data/
13
- sandbox/
 
5
 
6
  notebooks/
7
  *.pyc
 
 
 
 
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🌍
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 5.0.2
8
  app_file: app.py
9
  fullWidth: true
10
  pinned: false
 
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.19.1
8
  app_file: app.py
9
  fullWidth: true
10
  pinned: false
app.py CHANGED
@@ -1,12 +1,13 @@
1
  from climateqa.engine.embeddings import get_embeddings_function
2
  embeddings_function = get_embeddings_function()
3
 
 
4
  from sentence_transformers import CrossEncoder
5
 
6
- # reranker = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
 
7
 
8
  import gradio as gr
9
- from gradio_modal import Modal
10
  import pandas as pd
11
  import numpy as np
12
  import os
@@ -14,8 +15,6 @@ import time
14
  import re
15
  import json
16
 
17
- from gradio import ChatMessage
18
-
19
  # from gradio_modal import Modal
20
 
21
  from io import BytesIO
@@ -26,29 +25,20 @@ from azure.storage.fileshare import ShareServiceClient
26
 
27
  from utils import create_user_id
28
 
29
- from gradio_modal import Modal
30
-
31
- from PIL import Image
32
 
33
- from langchain_core.runnables.schema import StreamEvent
34
 
35
  # ClimateQ&A imports
36
  from climateqa.engine.llm import get_llm
 
37
  from climateqa.engine.vectorstore import get_pinecone_vectorstore
38
- # from climateqa.knowledge.retriever import ClimateQARetriever
39
- from climateqa.engine.reranker import get_reranker
40
  from climateqa.engine.embeddings import get_embeddings_function
41
- from climateqa.engine.chains.prompts import audience_prompts
42
  from climateqa.sample_questions import QUESTIONS
43
- from climateqa.constants import POSSIBLE_REPORTS, OWID_CATEGORIES
44
  from climateqa.utils import get_image_from_azure_blob_storage
45
- from climateqa.engine.graph import make_graph_agent
46
- from climateqa.engine.embeddings import get_embeddings_function
47
- from climateqa.engine.chains.retrieve_papers import find_papers
48
-
49
- from front.utils import serialize_docs,process_figures
50
-
51
- from climateqa.event_handler import init_audience, handle_retrieved_documents, stream_answer,handle_retrieved_owid_graphs
52
 
53
  # Load environment variables in local mode
54
  try:
@@ -57,8 +47,6 @@ try:
57
  except Exception as e:
58
  pass
59
 
60
- import requests
61
-
62
  # Set up Gradio Theme
63
  theme = gr.themes.Base(
64
  primary_hue="blue",
@@ -92,114 +80,134 @@ share_client = service.get_share_client(file_share_name)
92
  user_id = create_user_id()
93
 
94
 
95
- CITATION_LABEL = "BibTeX citation for ClimateQ&A"
96
- CITATION_TEXT = r"""@misc{climateqa,
97
- author={Théo Alves Da Costa, Timothée Bohe},
98
- title={ClimateQ&A, AI-powered conversational assistant for climate change and biodiversity loss},
99
- year={2024},
100
- howpublished= {\url{https://climateqa.com}},
101
- }
102
- @software{climateqa,
103
- author = {Théo Alves Da Costa, Timothée Bohe},
104
- publisher = {ClimateQ&A},
105
- title = {ClimateQ&A, AI-powered conversational assistant for climate change and biodiversity loss},
106
- }
107
- """
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
 
111
  # Create vectorstore and retriever
112
- vectorstore = get_pinecone_vectorstore(embeddings_function, index_name = os.getenv("PINECONE_API_INDEX"))
113
- vectorstore_graphs = get_pinecone_vectorstore(embeddings_function, index_name = os.getenv("PINECONE_API_INDEX_OWID"), text_key="description")
114
-
115
  llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
116
- reranker = get_reranker("large")
117
 
118
- agent = make_graph_agent(llm=llm, vectorstore_ipcc=vectorstore, vectorstore_graphs=vectorstore_graphs, reranker=reranker)
119
 
120
- def update_config_modal_visibility(config_open):
121
- new_config_visibility_status = not config_open
122
- return gr.update(visible=new_config_visibility_status), new_config_visibility_status
 
123
 
124
- async def chat(query, history, audience, sources, reports, relevant_content_sources, search_only):
 
 
 
 
 
 
 
 
 
 
 
125
  """taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
126
  (messages in gradio format, messages in langchain format, source documents)"""
127
 
128
- date_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
129
- print(f">> NEW QUESTION ({date_now}) : {query}")
130
 
131
- audience_prompt = init_audience(audience)
 
 
 
 
 
 
 
132
 
133
  # Prepare default values
134
- if sources is None or len(sources) == 0:
135
- sources = ["IPCC", "IPBES", "IPOS"]
136
 
137
- if reports is None or len(reports) == 0:
138
  reports = []
 
 
 
139
 
140
- inputs = {"user_input": query,"audience": audience_prompt,"sources_input":sources, "relevant_content_sources" : relevant_content_sources, "search_only": search_only, "reports": reports}
141
- result = agent.astream_events(inputs,version = "v1")
 
142
 
 
 
 
 
143
 
144
- docs = []
145
- related_contents = []
146
  docs_html = ""
147
  output_query = ""
148
  output_language = ""
149
  output_keywords = ""
150
- start_streaming = False
151
- graphs_html = ""
152
- figures = '<div class="figures-container"><p></p> </div>'
153
-
154
- steps_display = {
155
- "categorize_intent":("🔄️ Analyzing user message",True),
156
- "transform_query":("🔄️ Thinking step by step to answer the question",True),
157
- "retrieve_documents":("🔄️ Searching in the knowledge base",False),
158
- }
159
-
160
- used_documents = []
161
- answer_message_content = ""
162
  try:
163
- async for event in result:
164
- if "langgraph_node" in event["metadata"]:
165
- node = event["metadata"]["langgraph_node"]
166
 
167
- if event["event"] == "on_chain_end" and event["name"] == "retrieve_documents" and event["data"]["output"] != None:# when documents are retrieved
168
- docs, docs_html, history, used_documents, related_contents = handle_retrieved_documents(event, history, used_documents)
169
-
170
- elif event["event"] == "on_chain_end" and node == "categorize_intent" and event["name"] == "_write": # when the query is transformed
171
-
172
- intent = event["data"]["output"]["intent"]
173
- if "language" in event["data"]["output"]:
174
- output_language = event["data"]["output"]["language"]
175
- else :
176
- output_language = "English"
177
- history[-1].content = f"Language identified : {output_language} \n Intent identified : {intent}"
178
-
179
-
180
- elif event["name"] in steps_display.keys() and event["event"] == "on_chain_start": #display steps
181
- event_description, display_output = steps_display[node]
182
- if not hasattr(history[-1], 'metadata') or history[-1].metadata["title"] != event_description: # if a new step begins
183
- history.append(ChatMessage(role="assistant", content = "", metadata={'title' :event_description}))
184
-
185
- elif event["name"] != "transform_query" and event["event"] == "on_chat_model_stream" and node in ["answer_rag", "answer_rag_no_docs","answer_search","answer_chitchat"]:# if streaming answer
186
- history, start_streaming, answer_message_content = stream_answer(history, event, start_streaming, answer_message_content)
187
-
188
- elif event["name"] in ["retrieve_graphs", "retrieve_graphs_ai"] and event["event"] == "on_chain_end":
189
- graphs_html = handle_retrieved_owid_graphs(event, graphs_html)
190
-
191
-
192
- if event["name"] == "transform_query" and event["event"] =="on_chain_end":
193
- if hasattr(history[-1],"content"):
194
- history[-1].content += "Decompose question into sub-questions: \n\n - " + "\n - ".join([q["question"] for q in event["data"]["output"]["remaining_questions"]])
195
-
196
- if event["name"] == "categorize_intent" and event["event"] == "on_chain_start":
197
- print("X")
198
 
199
- yield history, docs_html, output_query, output_language, related_contents , graphs_html, #,output_query,output_keywords
200
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  except Exception as e:
202
- print(event, "has failed")
203
  raise gr.Error(f"{e}")
204
 
205
 
@@ -208,7 +216,7 @@ async def chat(query, history, audience, sources, reports, relevant_content_sour
208
  if os.getenv("GRADIO_ENV") != "local":
209
  timestamp = str(datetime.now().timestamp())
210
  file = timestamp + ".json"
211
- prompt = history[1]["content"]
212
  logs = {
213
  "user_id": str(user_id),
214
  "prompt": prompt,
@@ -216,7 +224,7 @@ async def chat(query, history, audience, sources, reports, relevant_content_sour
216
  "question":output_query,
217
  "sources":sources,
218
  "docs":serialize_docs(docs),
219
- "answer": history[-1].content,
220
  "time": timestamp,
221
  }
222
  log_on_azure(file, logs, share_client)
@@ -224,7 +232,119 @@ async def chat(query, history, audience, sources, reports, relevant_content_sour
224
  print(f"Error logging on Azure Blob Storage: {e}")
225
  raise gr.Error(f"ClimateQ&A Error: {str(e)[:100]} - The error has been noted, try another question and if the error remains, you can contact us :)")
226
 
227
- yield history, docs_html, output_query, output_language, related_contents, graphs_html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
 
230
  def save_feedback(feed: str, user_id):
@@ -248,7 +368,77 @@ def log_on_azure(file, logs, share_client):
248
  file_client.upload_file(logs)
249
 
250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
 
 
 
 
 
 
 
 
 
 
 
252
 
253
 
254
  # --------------------------------------------------------------------
@@ -263,15 +453,10 @@ Hello, I am ClimateQ&A, a conversational assistant designed to help you understa
263
  - **Language**: You can ask me your questions in any language.
264
  - **Audience**: You can specify your audience (children, general public, experts) to get a more adapted answer.
265
  - **Sources**: You can choose to search in the IPCC or IPBES reports, or both.
266
- - **Relevant content sources**: You can choose to search for figures, papers, or graphs that can be relevant for your question.
267
 
268
  ⚠️ Limitations
269
  *Please note that the AI is not perfect and may sometimes give irrelevant answers. If you are not satisfied with the answer, please ask a more specific question or report your feedback to help us improve the system.*
270
 
271
- 🛈 Information
272
- Please note that we log your questions for meta-analysis purposes, so avoid sharing any sensitive or personal information.
273
-
274
-
275
  What do you want to learn ?
276
  """
277
 
@@ -282,64 +467,35 @@ def vote(data: gr.LikeData):
282
  else:
283
  print(data)
284
 
285
- def save_graph(saved_graphs_state, embedding, category):
286
- print(f"\nCategory:\n{saved_graphs_state}\n")
287
- if category not in saved_graphs_state:
288
- saved_graphs_state[category] = []
289
- if embedding not in saved_graphs_state[category]:
290
- saved_graphs_state[category].append(embedding)
291
- return saved_graphs_state, gr.Button("Graph Saved")
292
 
293
 
 
 
294
 
295
- with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component") as demo:
296
- chat_completed_state = gr.State(0)
297
- current_graphs = gr.State([])
298
- saved_graphs = gr.State({})
299
- config_open = gr.State(False)
300
-
301
-
302
  with gr.Tab("ClimateQ&A"):
303
 
304
  with gr.Row(elem_id="chatbot-row"):
305
  with gr.Column(scale=2):
 
306
  chatbot = gr.Chatbot(
307
- value = [ChatMessage(role="assistant", content=init_prompt)],
308
- type = "messages",
309
- show_copy_button=True,
310
- show_label = False,
311
- elem_id="chatbot",
312
- layout = "panel",
313
  avatar_images = (None,"https://i.ibb.co/YNyd5W2/logo4.png"),
314
- max_height="80vh",
315
- height="100vh"
316
- )
317
 
318
  # bot.like(vote,None,None)
319
 
320
 
321
 
322
  with gr.Row(elem_id = "input-message"):
323
- textbox = gr.Textbox(
324
- placeholder="Ask me anything here!",
325
- show_label=False,
326
- scale=12,
327
- lines=1,
328
- interactive=True,
329
- elem_id="input-textbox"
330
- )
331
-
332
- config_button = gr.Button(
333
- "",
334
- elem_id="config-button"
335
- )
336
-
337
-
338
 
339
- with gr.Column(scale=2, variant="panel",elem_id = "right-panel"):
340
 
341
 
342
- with gr.Tabs(elem_id = "right_panel_tab") as tabs:
343
  with gr.TabItem("Examples",elem_id = "tab-examples",id = 0):
344
 
345
  examples_hidden = gr.Textbox(visible = False)
@@ -365,305 +521,102 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
365
  )
366
 
367
  samples.append(group_examples)
368
-
369
- # with gr.Tab("Configuration", id = 10, ) as tab_config:
370
- # # gr.Markdown("Reminders: You can talk in any language, ClimateQ&A is multi-lingual!")
371
 
372
- # pass
373
-
374
- # with gr.Row():
375
-
376
- # dropdown_sources = gr.CheckboxGroup(
377
- # ["IPCC", "IPBES","IPOS"],
378
- # label="Select source",
379
- # value=["IPCC"],
380
- # interactive=True,
381
- # )
382
- # dropdown_external_sources = gr.CheckboxGroup(
383
- # ["IPCC figures","OpenAlex", "OurWorldInData"],
384
- # label="Select database to search for relevant content",
385
- # value=["IPCC figures"],
386
- # interactive=True,
387
- # )
388
-
389
- # dropdown_reports = gr.Dropdown(
390
- # POSSIBLE_REPORTS,
391
- # label="Or select specific reports",
392
- # multiselect=True,
393
- # value=None,
394
- # interactive=True,
395
- # )
396
-
397
- # search_only = gr.Checkbox(label="Search only without chating", value=False, interactive=True, elem_id="checkbox-chat")
398
-
399
-
400
- # dropdown_audience = gr.Dropdown(
401
- # ["Children","General public","Experts"],
402
- # label="Select audience",
403
- # value="Experts",
404
- # interactive=True,
405
- # )
406
-
407
-
408
- # after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers", visible=False)
409
-
410
 
411
- # output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False, visible= False)
412
- # output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False, visible= False)
 
413
 
 
 
414
 
415
- # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after])
416
- # # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after], visible=True)
417
 
418
 
419
- with gr.Tab("Sources",elem_id = "tab-sources",id = 1) as tab_sources:
420
- sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
421
-
422
-
423
-
424
- with gr.Tab("Recommended content", elem_id="tab-recommended_content",id=2) as tab_recommended_content:
425
- with gr.Tabs(elem_id = "group-subtabs") as tabs_recommended_content:
426
-
427
- with gr.Tab("Figures",elem_id = "tab-figures",id = 3) as tab_figures:
428
- sources_raw = gr.State([])
429
- new_figures = gr.State([])
430
- used_figures = gr.State([])
431
-
432
- with Modal(visible=False, elem_id="modal_figure_galery") as figure_modal:
433
- gallery_component = gr.Gallery(object_fit='scale-down',elem_id="gallery-component", height="80vh")
434
-
435
- show_full_size_figures = gr.Button("Show figures in full size",elem_id="show-figures",interactive=True)
436
- show_full_size_figures.click(lambda : Modal(visible=True),None,figure_modal)
437
-
438
- figures_cards = gr.HTML(show_label=False, elem_id="sources-figures")
439
-
440
-
441
-
442
- with gr.Tab("Papers",elem_id = "tab-citations",id = 4) as tab_papers:
443
- # btn_summary = gr.Button("Summary")
444
- # Fenêtre simulée pour le Summary
445
- with gr.Accordion(visible=True, elem_id="papers-summary-popup", label= "See summary of relevant papers", open= False) as summary_popup:
446
- papers_summary = gr.Markdown("", visible=True, elem_id="papers-summary")
447
-
448
- # btn_relevant_papers = gr.Button("Relevant papers")
449
- # Fenêtre simulée pour les Relevant Papers
450
- with gr.Accordion(visible=True, elem_id="papers-relevant-popup",label= "See relevant papers", open= False) as relevant_popup:
451
- papers_html = gr.HTML(show_label=False, elem_id="papers-textbox")
452
-
453
- btn_citations_network = gr.Button("Explore papers citations network")
454
- # Fenêtre simulée pour le Citations Network
455
- with Modal(visible=False) as papers_modal:
456
- citations_network = gr.HTML("<h3>Citations Network Graph</h3>", visible=True, elem_id="papers-citations-network")
457
- btn_citations_network.click(lambda: Modal(visible=True), None, papers_modal)
458
-
459
-
460
-
461
- with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
462
-
463
- graphs_container = gr.HTML("<h2>There are no graphs to be displayed at the moment. Try asking another question.</h2>",elem_id="graphs-container")
464
- current_graphs.change(lambda x : x, inputs=[current_graphs], outputs=[graphs_container])
465
-
466
- with Modal(visible=False,elem_id="modal-config") as config_modal:
467
- gr.Markdown("Reminders: You can talk in any language, ClimateQ&A is multi-lingual!")
468
-
469
-
470
- # with gr.Row():
471
-
472
- dropdown_sources = gr.CheckboxGroup(
473
- ["IPCC", "IPBES","IPOS"],
474
- label="Select source (by default search in all sources)",
475
- value=["IPCC"],
476
- interactive=True,
477
- )
478
-
479
- dropdown_reports = gr.Dropdown(
480
- POSSIBLE_REPORTS,
481
- label="Or select specific reports",
482
- multiselect=True,
483
- value=None,
484
- interactive=True,
485
- )
486
-
487
- dropdown_external_sources = gr.CheckboxGroup(
488
- ["Figures (IPCC/IPBES)","Papers (OpenAlex)", "Graphs (OurWorldInData)"],
489
- label="Select database to search for relevant content",
490
- value=["Figures (IPCC/IPBES)"],
491
- interactive=True,
492
- )
493
-
494
- search_only = gr.Checkbox(label="Search only for recommended content without chating", value=False, interactive=True, elem_id="checkbox-chat")
495
-
496
-
497
- dropdown_audience = gr.Dropdown(
498
- ["Children","General public","Experts"],
499
- label="Select audience",
500
- value="Experts",
501
- interactive=True,
502
- )
503
-
504
-
505
- after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers", visible=False)
506
-
507
 
508
- output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False, visible= False)
509
- output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False, visible= False)
510
 
511
 
512
- dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "Papers (OpenAlex)" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after])
513
-
514
- close_config_modal = gr.Button("Validate and Close",elem_id="close-config-modal")
515
- close_config_modal.click(fn=update_config_modal_visibility, inputs=[config_open], outputs=[config_modal, config_open])
516
- # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after], visible=True)
517
-
518
 
519
-
520
- config_button.click(fn=update_config_modal_visibility, inputs=[config_open], outputs=[config_modal, config_open])
521
-
522
- # with gr.Tab("OECD",elem_id = "tab-oecd",id = 6):
523
- # oecd_indicator = "RIVER_FLOOD_RP100_POP_SH"
524
- # oecd_topic = "climate"
525
- # oecd_latitude = "46.8332"
526
- # oecd_longitude = "5.3725"
527
- # oecd_zoom = "5.6442"
528
- # # Create the HTML content with the iframe
529
- # iframe_html = f"""
530
- # <iframe src="https://localdataportal.oecd.org/maps.html?indicator={oecd_indicator}&topic={oecd_topic}&latitude={oecd_latitude}&longitude={oecd_longitude}&zoom={oecd_zoom}"
531
- # width="100%" height="600" frameborder="0" style="border:0;" allowfullscreen></iframe>
532
- # """
533
- # oecd_textbox = gr.HTML(iframe_html, show_label=False, elem_id="oecd-textbox")
534
-
535
-
536
 
537
 
538
  #---------------------------------------------------------------------------------------
539
  # OTHER TABS
540
  #---------------------------------------------------------------------------------------
541
 
542
- # with gr.Tab("Settings",elem_id = "tab-config",id = 2):
543
 
544
- # gr.Markdown("Reminder: You can talk in any language, ClimateQ&A is multi-lingual!")
 
545
 
 
546
 
547
- # dropdown_sources = gr.CheckboxGroup(
548
- # ["IPCC", "IPBES","IPOS", "OpenAlex"],
549
- # label="Select source",
550
- # value=["IPCC"],
551
- # interactive=True,
552
- # )
553
 
554
- # dropdown_reports = gr.Dropdown(
555
- # POSSIBLE_REPORTS,
556
- # label="Or select specific reports",
557
- # multiselect=True,
558
- # value=None,
559
- # interactive=True,
560
- # )
561
 
562
- # dropdown_audience = gr.Dropdown(
563
- # ["Children","General public","Experts"],
564
- # label="Select audience",
565
- # value="Experts",
566
- # interactive=True,
567
- # )
568
 
 
 
569
 
570
- # output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False)
571
- # output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
572
 
573
 
 
574
  with gr.Tab("About",elem_classes = "max-height other-tabs"):
575
  with gr.Row():
576
  with gr.Column(scale=1):
 
577
 
578
 
579
-
580
-
581
- gr.Markdown(
582
- """
583
- ### More info
584
- - See more info at [https://climateqa.com](https://climateqa.com/docs/intro/)
585
- - Feedbacks on this [form](https://forms.office.com/e/1Yzgxm6jbp)
586
-
587
- ### Citation
588
- """
589
- )
590
- with gr.Accordion(CITATION_LABEL,elem_id="citation", open = False,):
591
- # # Display citation label and text)
592
- gr.Textbox(
593
- value=CITATION_TEXT,
594
- label="",
595
- interactive=False,
596
- show_copy_button=True,
597
- lines=len(CITATION_TEXT.split('\n')),
598
- )
599
-
600
-
601
-
602
- def start_chat(query,history,search_only):
603
- history = history + [ChatMessage(role="user", content=query)]
604
- if not search_only:
605
- return (gr.update(interactive = False),gr.update(selected=1),history, [])
606
- else:
607
- return (gr.update(interactive = False),gr.update(selected=2),history, [])
608
 
609
  def finish_chat():
610
- return gr.update(interactive = True,value = "")
611
-
612
- # Initialize visibility states
613
- summary_visible = False
614
- relevant_visible = False
615
-
616
- # Functions to toggle visibility
617
- def toggle_summary_visibility():
618
- global summary_visible
619
- summary_visible = not summary_visible
620
- return gr.update(visible=summary_visible)
621
 
622
- def toggle_relevant_visibility():
623
- global relevant_visible
624
- relevant_visible = not relevant_visible
625
- return gr.update(visible=relevant_visible)
626
-
627
-
628
- def change_completion_status(current_state):
629
- current_state = 1 - current_state
630
- return current_state
631
-
632
- def update_sources_number_display(sources_textbox, figures_cards, current_graphs, papers_html):
633
- sources_number = sources_textbox.count("<h2>")
634
- figures_number = figures_cards.count("<h2>")
635
- graphs_number = current_graphs.count("<iframe")
636
- papers_number = papers_html.count("<h2>")
637
- sources_notif_label = f"Sources ({sources_number})"
638
- figures_notif_label = f"Figures ({figures_number})"
639
- graphs_notif_label = f"Graphs ({graphs_number})"
640
- papers_notif_label = f"Papers ({papers_number})"
641
- recommended_content_notif_label = f"Recommended content ({figures_number + graphs_number + papers_number})"
642
-
643
- return gr.update(label = recommended_content_notif_label), gr.update(label = sources_notif_label), gr.update(label = figures_notif_label), gr.update(label = graphs_notif_label), gr.update(label = papers_notif_label)
644
-
645
  (textbox
646
- .submit(start_chat, [textbox, chatbot, search_only],
647
- [textbox, tabs, chatbot, sources_raw],
648
- queue=False,
649
- api_name="start_chat_textbox")
650
- .then(chat, [textbox, chatbot, dropdown_audience, dropdown_sources,
651
- dropdown_reports, dropdown_external_sources, search_only],
652
- [chatbot, sources_textbox, output_query, output_language,
653
- new_figures, current_graphs],
654
- concurrency_limit=8,
655
- api_name="chat_textbox")
656
- .then(finish_chat, None, [textbox],
657
- api_name="finish_chat_textbox")
658
  )
659
 
660
-
661
-
662
  (examples_hidden
663
- .change(start_chat, [examples_hidden,chatbot, search_only], [textbox,tabs,chatbot, sources_raw],queue = False,api_name = "start_chat_examples")
664
- .then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, dropdown_external_sources, search_only] ,[chatbot,sources_textbox,output_query,output_language, new_figures, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
665
  .then(finish_chat, None, [textbox],api_name = "finish_chat_examples")
666
- # .then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_sources, tab_figures, tab_graphs, tab_papers] )
667
  )
668
 
669
 
@@ -674,26 +627,51 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
674
  return [gr.update(visible=visible_bools[i]) for i in range(len(samples))]
675
 
676
 
677
- new_figures.change(process_figures, inputs=[sources_raw, new_figures], outputs=[sources_raw, figures_cards, gallery_component])
678
-
679
- # update sources numbers
680
- sources_textbox.change(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_recommended_content, tab_sources, tab_figures, tab_graphs, tab_papers])
681
- figures_cards.change(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_recommended_content, tab_sources, tab_figures, tab_graphs, tab_papers])
682
- current_graphs.change(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_recommended_content, tab_sources, tab_figures, tab_graphs, tab_papers])
683
- papers_html.change(update_sources_number_display, [sources_textbox, figures_cards, current_graphs,papers_html],[tab_recommended_content, tab_sources, tab_figures, tab_graphs, tab_papers])
684
 
685
- # other questions examples
686
  dropdown_samples.change(change_sample_questions,dropdown_samples,samples)
687
 
688
- # search for papers
689
- textbox.submit(find_papers,[textbox,after, dropdown_external_sources], [papers_html,citations_network,papers_summary])
690
- examples_hidden.change(find_papers,[examples_hidden,after,dropdown_external_sources], [papers_html,citations_network,papers_summary])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
691
 
692
- # btn_summary.click(toggle_summary_visibility, outputs=summary_popup)
693
- # btn_relevant_papers.click(toggle_relevant_visibility, outputs=relevant_popup)
694
 
695
- demo.queue()
 
 
 
 
 
 
 
 
 
 
 
 
696
 
697
 
 
 
 
 
698
 
699
- demo.launch(ssr_mode=False)
 
1
  from climateqa.engine.embeddings import get_embeddings_function
2
  embeddings_function = get_embeddings_function()
3
 
4
+ from climateqa.papers.openalex import OpenAlex
5
  from sentence_transformers import CrossEncoder
6
 
7
+ reranker = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
8
+ oa = OpenAlex()
9
 
10
  import gradio as gr
 
11
  import pandas as pd
12
  import numpy as np
13
  import os
 
15
  import re
16
  import json
17
 
 
 
18
  # from gradio_modal import Modal
19
 
20
  from io import BytesIO
 
25
 
26
  from utils import create_user_id
27
 
 
 
 
28
 
 
29
 
30
  # ClimateQ&A imports
31
  from climateqa.engine.llm import get_llm
32
+ from climateqa.engine.rag import make_rag_chain
33
  from climateqa.engine.vectorstore import get_pinecone_vectorstore
34
+ from climateqa.engine.retriever import ClimateQARetriever
 
35
  from climateqa.engine.embeddings import get_embeddings_function
36
+ from climateqa.engine.prompts import audience_prompts
37
  from climateqa.sample_questions import QUESTIONS
38
+ from climateqa.constants import POSSIBLE_REPORTS
39
  from climateqa.utils import get_image_from_azure_blob_storage
40
+ from climateqa.engine.keywords import make_keywords_chain
41
+ from climateqa.engine.rag import make_rag_papers_chain
 
 
 
 
 
42
 
43
  # Load environment variables in local mode
44
  try:
 
47
  except Exception as e:
48
  pass
49
 
 
 
50
  # Set up Gradio Theme
51
  theme = gr.themes.Base(
52
  primary_hue="blue",
 
80
  user_id = create_user_id()
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ def parse_output_llm_with_sources(output):
85
+ # Split the content into a list of text and "[Doc X]" references
86
+ content_parts = re.split(r'\[(Doc\s?\d+(?:,\s?Doc\s?\d+)*)\]', output)
87
+ parts = []
88
+ for part in content_parts:
89
+ if part.startswith("Doc"):
90
+ subparts = part.split(",")
91
+ subparts = [subpart.lower().replace("doc","").strip() for subpart in subparts]
92
+ subparts = [f"""<a href="#doc{subpart}" class="a-doc-ref" target="_self"><span class='doc-ref'><sup>{subpart}</sup></span></a>""" for subpart in subparts]
93
+ parts.append("".join(subparts))
94
+ else:
95
+ parts.append(part)
96
+ content_parts = "".join(parts)
97
+ return content_parts
98
 
99
 
100
  # Create vectorstore and retriever
101
+ vectorstore = get_pinecone_vectorstore(embeddings_function)
 
 
102
  llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
 
103
 
 
104
 
105
+ def make_pairs(lst):
106
+ """from a list of even lenght, make tupple pairs"""
107
+ return [(lst[i], lst[i + 1]) for i in range(0, len(lst), 2)]
108
+
109
 
110
+ def serialize_docs(docs):
111
+ new_docs = []
112
+ for doc in docs:
113
+ new_doc = {}
114
+ new_doc["page_content"] = doc.page_content
115
+ new_doc["metadata"] = doc.metadata
116
+ new_docs.append(new_doc)
117
+ return new_docs
118
+
119
+
120
+
121
+ async def chat(query,history,audience,sources,reports):
122
  """taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
123
  (messages in gradio format, messages in langchain format, source documents)"""
124
 
125
+ print(f">> NEW QUESTION : {query}")
 
126
 
127
+ if audience == "Children":
128
+ audience_prompt = audience_prompts["children"]
129
+ elif audience == "General public":
130
+ audience_prompt = audience_prompts["general"]
131
+ elif audience == "Experts":
132
+ audience_prompt = audience_prompts["experts"]
133
+ else:
134
+ audience_prompt = audience_prompts["experts"]
135
 
136
  # Prepare default values
137
+ if len(sources) == 0:
138
+ sources = ["IPCC"]
139
 
140
+ if len(reports) == 0:
141
  reports = []
142
+
143
+ retriever = ClimateQARetriever(vectorstore=vectorstore,sources = sources,min_size = 200,reports = reports,k_summary = 3,k_total = 15,threshold=0.5)
144
+ rag_chain = make_rag_chain(retriever,llm)
145
 
146
+ inputs = {"query": query,"audience": audience_prompt}
147
+ result = rag_chain.astream_log(inputs) #{"callbacks":[MyCustomAsyncHandler()]})
148
+ # result = rag_chain.stream(inputs)
149
 
150
+ path_reformulation = "/logs/reformulation/final_output"
151
+ path_keywords = "/logs/keywords/final_output"
152
+ path_retriever = "/logs/find_documents/final_output"
153
+ path_answer = "/logs/answer/streamed_output_str/-"
154
 
 
 
155
  docs_html = ""
156
  output_query = ""
157
  output_language = ""
158
  output_keywords = ""
159
+ gallery = []
160
+
 
 
 
 
 
 
 
 
 
 
161
  try:
162
+ async for op in result:
 
 
163
 
164
+ op = op.ops[0]
165
+
166
+ if op['path'] == path_reformulation: # reforulated question
167
+ try:
168
+ output_language = op['value']["language"] # str
169
+ output_query = op["value"]["question"]
170
+ except Exception as e:
171
+ raise gr.Error(f"ClimateQ&A Error: {e} - The error has been noted, try another question and if the error remains, you can contact us :)")
172
+
173
+ if op["path"] == path_keywords:
174
+ try:
175
+ output_keywords = op['value']["keywords"] # str
176
+ output_keywords = " AND ".join(output_keywords)
177
+ except Exception as e:
178
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+
181
+ elif op['path'] == path_retriever: # documents
182
+ try:
183
+ docs = op['value']['docs'] # List[Document]
184
+ docs_html = []
185
+ for i, d in enumerate(docs, 1):
186
+ docs_html.append(make_html_source(d, i))
187
+ docs_html = "".join(docs_html)
188
+ except TypeError:
189
+ print("No documents found")
190
+ print("op: ",op)
191
+ continue
192
+
193
+ elif op['path'] == path_answer: # final answer
194
+ new_token = op['value'] # str
195
+ # time.sleep(0.01)
196
+ previous_answer = history[-1][1]
197
+ previous_answer = previous_answer if previous_answer is not None else ""
198
+ answer_yet = previous_answer + new_token
199
+ answer_yet = parse_output_llm_with_sources(answer_yet)
200
+ history[-1] = (query,answer_yet)
201
+
202
+
203
+
204
+ else:
205
+ continue
206
+
207
+ history = [tuple(x) for x in history]
208
+ yield history,docs_html,output_query,output_language,gallery,output_query,output_keywords
209
+
210
  except Exception as e:
 
211
  raise gr.Error(f"{e}")
212
 
213
 
 
216
  if os.getenv("GRADIO_ENV") != "local":
217
  timestamp = str(datetime.now().timestamp())
218
  file = timestamp + ".json"
219
+ prompt = history[-1][0]
220
  logs = {
221
  "user_id": str(user_id),
222
  "prompt": prompt,
 
224
  "question":output_query,
225
  "sources":sources,
226
  "docs":serialize_docs(docs),
227
+ "answer": history[-1][1],
228
  "time": timestamp,
229
  }
230
  log_on_azure(file, logs, share_client)
 
232
  print(f"Error logging on Azure Blob Storage: {e}")
233
  raise gr.Error(f"ClimateQ&A Error: {str(e)[:100]} - The error has been noted, try another question and if the error remains, you can contact us :)")
234
 
235
+ image_dict = {}
236
+ for i,doc in enumerate(docs):
237
+
238
+ if doc.metadata["chunk_type"] == "image":
239
+ try:
240
+ key = f"Image {i+1}"
241
+ image_path = doc.metadata["image_path"].split("documents/")[1]
242
+ img = get_image_from_azure_blob_storage(image_path)
243
+
244
+ # Convert the image to a byte buffer
245
+ buffered = BytesIO()
246
+ img.save(buffered, format="PNG")
247
+ img_str = base64.b64encode(buffered.getvalue()).decode()
248
+
249
+ # Embedding the base64 string in Markdown
250
+ markdown_image = f"![Alt text](data:image/png;base64,{img_str})"
251
+ image_dict[key] = {"img":img,"md":markdown_image,"caption":doc.page_content,"key":key,"figure_code":doc.metadata["figure_code"]}
252
+ except Exception as e:
253
+ print(f"Skipped adding image {i} because of {e}")
254
+
255
+ if len(image_dict) > 0:
256
+
257
+ gallery = [x["img"] for x in list(image_dict.values())]
258
+ img = list(image_dict.values())[0]
259
+ img_md = img["md"]
260
+ img_caption = img["caption"]
261
+ img_code = img["figure_code"]
262
+ if img_code != "N/A":
263
+ img_name = f"{img['key']} - {img['figure_code']}"
264
+ else:
265
+ img_name = f"{img['key']}"
266
+
267
+ answer_yet = history[-1][1] + f"\n\n{img_md}\n<p class='chatbot-caption'><b>{img_name}</b> - {img_caption}</p>"
268
+ history[-1] = (history[-1][0],answer_yet)
269
+ history = [tuple(x) for x in history]
270
+
271
+ # gallery = [x.metadata["image_path"] for x in docs if (len(x.metadata["image_path"]) > 0 and "IAS" in x.metadata["image_path"])]
272
+ # if len(gallery) > 0:
273
+ # gallery = list(set("|".join(gallery).split("|")))
274
+ # gallery = [get_image_from_azure_blob_storage(x) for x in gallery]
275
+
276
+ yield history,docs_html,output_query,output_language,gallery,output_query,output_keywords
277
+
278
+
279
+ def make_html_source(source,i):
280
+ meta = source.metadata
281
+ # content = source.page_content.split(":",1)[1].strip()
282
+ content = source.page_content.strip()
283
+
284
+ toc_levels = []
285
+ for j in range(2):
286
+ level = meta[f"toc_level{j}"]
287
+ if level != "N/A":
288
+ toc_levels.append(level)
289
+ else:
290
+ break
291
+ toc_levels = " > ".join(toc_levels)
292
+
293
+ if len(toc_levels) > 0:
294
+ name = f"<b>{toc_levels}</b><br/>{meta['name']}"
295
+ else:
296
+ name = meta['name']
297
+
298
+ if meta["chunk_type"] == "text":
299
+
300
+ card = f"""
301
+ <div class="card" id="doc{i}">
302
+ <div class="card-content">
303
+ <h2>Doc {i} - {meta['short_name']} - Page {int(meta['page_number'])}</h2>
304
+ <p>{content}</p>
305
+ </div>
306
+ <div class="card-footer">
307
+ <span>{name}</span>
308
+ <a href="{meta['url']}#page={int(meta['page_number'])}" target="_blank" class="pdf-link">
309
+ <span role="img" aria-label="Open PDF">🔗</span>
310
+ </a>
311
+ </div>
312
+ </div>
313
+ """
314
+
315
+ else:
316
+
317
+ if meta["figure_code"] != "N/A":
318
+ title = f"{meta['figure_code']} - {meta['short_name']}"
319
+ else:
320
+ title = f"{meta['short_name']}"
321
+
322
+ card = f"""
323
+ <div class="card card-image">
324
+ <div class="card-content">
325
+ <h2>Image {i} - {title} - Page {int(meta['page_number'])}</h2>
326
+ <p>{content}</p>
327
+ <p class='ai-generated'>AI-generated description</p>
328
+ </div>
329
+ <div class="card-footer">
330
+ <span>{name}</span>
331
+ <a href="{meta['url']}#page={int(meta['page_number'])}" target="_blank" class="pdf-link">
332
+ <span role="img" aria-label="Open PDF">🔗</span>
333
+ </a>
334
+ </div>
335
+ </div>
336
+ """
337
+
338
+ return card
339
+
340
+
341
+
342
+ # else:
343
+ # docs_string = "No relevant passages found in the climate science reports (IPCC and IPBES)"
344
+ # complete_response = "**No relevant passages found in the climate science reports (IPCC and IPBES), you may want to ask a more specific question (specifying your question on climate issues).**"
345
+ # messages.append({"role": "assistant", "content": complete_response})
346
+ # gradio_format = make_pairs([a["content"] for a in messages[1:]])
347
+ # yield gradio_format, messages, docs_string
348
 
349
 
350
  def save_feedback(feed: str, user_id):
 
368
  file_client.upload_file(logs)
369
 
370
 
371
+ def generate_keywords(query):
372
+ chain = make_keywords_chain(llm)
373
+ keywords = chain.invoke(query)
374
+ keywords = " AND ".join(keywords["keywords"])
375
+ return keywords
376
+
377
+
378
+
379
+ papers_cols_widths = {
380
+ "doc":50,
381
+ "id":100,
382
+ "title":300,
383
+ "doi":100,
384
+ "publication_year":100,
385
+ "abstract":500,
386
+ "rerank_score":100,
387
+ "is_oa":50,
388
+ }
389
+
390
+ papers_cols = list(papers_cols_widths.keys())
391
+ papers_cols_widths = list(papers_cols_widths.values())
392
+
393
+ async def find_papers(query, keywords,after):
394
+
395
+ summary = ""
396
+
397
+ df_works = oa.search(keywords,after = after)
398
+ df_works = df_works.dropna(subset=["abstract"])
399
+ df_works = oa.rerank(query,df_works,reranker)
400
+ df_works = df_works.sort_values("rerank_score",ascending=False)
401
+ G = oa.make_network(df_works)
402
+
403
+ height = "750px"
404
+ network = oa.show_network(G,color_by = "rerank_score",notebook=False,height = height)
405
+ network_html = network.generate_html()
406
+
407
+ network_html = network_html.replace("'", "\"")
408
+ css_to_inject = "<style>#mynetwork { border: none !important; } .card { border: none !important; }</style>"
409
+ network_html = network_html + css_to_inject
410
+
411
+
412
+ network_html = f"""<iframe style="width: 100%; height: {height};margin:0 auto" name="result" allow="midi; geolocation; microphone; camera;
413
+ display-capture; encrypted-media;" sandbox="allow-modals allow-forms
414
+ allow-scripts allow-same-origin allow-popups
415
+ allow-top-navigation-by-user-activation allow-downloads" allowfullscreen=""
416
+ allowpaymentrequest="" frameborder="0" srcdoc='{network_html}'></iframe>"""
417
+
418
+
419
+ docs = df_works["content"].head(15).tolist()
420
+
421
+ df_works = df_works.reset_index(drop = True).reset_index().rename(columns = {"index":"doc"})
422
+ df_works["doc"] = df_works["doc"] + 1
423
+ df_works = df_works[papers_cols]
424
+
425
+ yield df_works,network_html,summary
426
+
427
+ chain = make_rag_papers_chain(llm)
428
+ result = chain.astream_log({"question": query,"docs": docs,"language":"English"})
429
+ path_answer = "/logs/StrOutputParser/streamed_output/-"
430
 
431
+ async for op in result:
432
+
433
+ op = op.ops[0]
434
+
435
+ if op['path'] == path_answer: # reforulated question
436
+ new_token = op['value'] # str
437
+ summary += new_token
438
+ else:
439
+ continue
440
+ yield df_works,network_html,summary
441
+
442
 
443
 
444
  # --------------------------------------------------------------------
 
453
  - **Language**: You can ask me your questions in any language.
454
  - **Audience**: You can specify your audience (children, general public, experts) to get a more adapted answer.
455
  - **Sources**: You can choose to search in the IPCC or IPBES reports, or both.
 
456
 
457
  ⚠️ Limitations
458
  *Please note that the AI is not perfect and may sometimes give irrelevant answers. If you are not satisfied with the answer, please ask a more specific question or report your feedback to help us improve the system.*
459
 
 
 
 
 
460
  What do you want to learn ?
461
  """
462
 
 
467
  else:
468
  print(data)
469
 
 
 
 
 
 
 
 
470
 
471
 
472
+ with gr.Blocks(title="Climate Q&A", css="style.css", theme=theme,elem_id = "main-component") as demo:
473
+ # user_id_state = gr.State([user_id])
474
 
 
 
 
 
 
 
 
475
  with gr.Tab("ClimateQ&A"):
476
 
477
  with gr.Row(elem_id="chatbot-row"):
478
  with gr.Column(scale=2):
479
+ # state = gr.State([system_template])
480
  chatbot = gr.Chatbot(
481
+ value=[(None,init_prompt)],
482
+ show_copy_button=True,show_label = False,elem_id="chatbot",layout = "panel",
 
 
 
 
483
  avatar_images = (None,"https://i.ibb.co/YNyd5W2/logo4.png"),
484
+ )#,avatar_images = ("assets/logo4.png",None))
 
 
485
 
486
  # bot.like(vote,None,None)
487
 
488
 
489
 
490
  with gr.Row(elem_id = "input-message"):
491
+ textbox=gr.Textbox(placeholder="Ask me anything here!",show_label=False,scale=7,lines = 1,interactive = True,elem_id="input-textbox")
492
+ # submit = gr.Button("",elem_id = "submit-button",scale = 1,interactive = True,icon = "https://static-00.iconduck.com/assets.00/settings-icon-2048x2046-cw28eevx.png")
493
+
 
 
 
 
 
 
 
 
 
 
 
 
494
 
495
+ with gr.Column(scale=1, variant="panel",elem_id = "right-panel"):
496
 
497
 
498
+ with gr.Tabs() as tabs:
499
  with gr.TabItem("Examples",elem_id = "tab-examples",id = 0):
500
 
501
  examples_hidden = gr.Textbox(visible = False)
 
521
  )
522
 
523
  samples.append(group_examples)
 
 
 
524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525
 
526
+ with gr.Tab("Sources",elem_id = "tab-citations",id = 1):
527
+ sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
528
+ docs_textbox = gr.State("")
529
 
530
+ # with Modal(visible = False) as config_modal:
531
+ with gr.Tab("Configuration",elem_id = "tab-config",id = 2):
532
 
533
+ gr.Markdown("Reminder: You can talk in any language, ClimateQ&A is multi-lingual!")
 
534
 
535
 
536
+ dropdown_sources = gr.CheckboxGroup(
537
+ ["IPCC", "IPBES","IPOS"],
538
+ label="Select source",
539
+ value=["IPCC"],
540
+ interactive=True,
541
+ )
542
+
543
+ dropdown_reports = gr.Dropdown(
544
+ POSSIBLE_REPORTS,
545
+ label="Or select specific reports",
546
+ multiselect=True,
547
+ value=None,
548
+ interactive=True,
549
+ )
550
+
551
+ dropdown_audience = gr.Dropdown(
552
+ ["Children","General public","Experts"],
553
+ label="Select audience",
554
+ value="Experts",
555
+ interactive=True,
556
+ )
557
+
558
+ output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False)
559
+ output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
560
 
 
 
561
 
562
 
 
 
 
 
 
 
563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
 
565
 
566
  #---------------------------------------------------------------------------------------
567
  # OTHER TABS
568
  #---------------------------------------------------------------------------------------
569
 
 
570
 
571
+ with gr.Tab("Figures",elem_id = "tab-images",elem_classes = "max-height other-tabs"):
572
+ gallery_component = gr.Gallery()
573
 
574
+ with gr.Tab("Papers (beta)",elem_id = "tab-papers",elem_classes = "max-height other-tabs"):
575
 
576
+ with gr.Row():
577
+ with gr.Column(scale=1):
578
+ query_papers = gr.Textbox(placeholder="Question",show_label=False,lines = 1,interactive = True,elem_id="query-papers")
579
+ keywords_papers = gr.Textbox(placeholder="Keywords",show_label=False,lines = 1,interactive = True,elem_id="keywords-papers")
580
+ after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers")
581
+ search_papers = gr.Button("Search",elem_id="search-papers",interactive=True)
582
 
583
+ with gr.Column(scale=7):
 
 
 
 
 
 
584
 
585
+ with gr.Tab("Summary",elem_id="papers-summary-tab"):
586
+ papers_summary = gr.Markdown(visible=True,elem_id="papers-summary")
 
 
 
 
587
 
588
+ with gr.Tab("Relevant papers",elem_id="papers-results-tab"):
589
+ papers_dataframe = gr.Dataframe(visible=True,elem_id="papers-table",headers = papers_cols)
590
 
591
+ with gr.Tab("Citations network",elem_id="papers-network-tab"):
592
+ citations_network = gr.HTML(visible=True,elem_id="papers-citations-network")
593
 
594
 
595
+
596
  with gr.Tab("About",elem_classes = "max-height other-tabs"):
597
  with gr.Row():
598
  with gr.Column(scale=1):
599
+ gr.Markdown("See more info at [https://climateqa.com](https://climateqa.com/docs/intro/)")
600
 
601
 
602
+ def start_chat(query,history):
603
+ history = history + [(query,None)]
604
+ history = [tuple(x) for x in history]
605
+ return (gr.update(interactive = False),gr.update(selected=1),history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
 
607
  def finish_chat():
608
+ return (gr.update(interactive = True,value = ""))
 
 
 
 
 
 
 
 
 
 
609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
610
  (textbox
611
+ .submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
612
+ .then(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery_component,query_papers,keywords_papers],concurrency_limit = 8,api_name = "chat_textbox")
613
+ .then(finish_chat, None, [textbox],api_name = "finish_chat_textbox")
 
 
 
 
 
 
 
 
 
614
  )
615
 
 
 
616
  (examples_hidden
617
+ .change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_examples")
618
+ .then(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery_component,query_papers,keywords_papers],concurrency_limit = 8,api_name = "chat_examples")
619
  .then(finish_chat, None, [textbox],api_name = "finish_chat_examples")
 
620
  )
621
 
622
 
 
627
  return [gr.update(visible=visible_bools[i]) for i in range(len(samples))]
628
 
629
 
 
 
 
 
 
 
 
630
 
 
631
  dropdown_samples.change(change_sample_questions,dropdown_samples,samples)
632
 
633
+ query_papers.submit(generate_keywords,[query_papers], [keywords_papers])
634
+ search_papers.click(find_papers,[query_papers,keywords_papers,after], [papers_dataframe,citations_network,papers_summary])
635
+
636
+ # # textbox.submit(predict_climateqa,[textbox,bot],[None,bot,sources_textbox])
637
+ # (textbox
638
+ # .submit(answer_user, [textbox,examples_hidden, bot], [textbox, bot],queue = False)
639
+ # .success(change_tab,None,tabs)
640
+ # .success(fetch_sources,[textbox,dropdown_sources], [textbox,sources_textbox,docs_textbox,output_query,output_language])
641
+ # .success(answer_bot, [textbox,bot,docs_textbox,output_query,output_language,dropdown_audience], [textbox,bot],queue = True)
642
+ # .success(lambda x : textbox,[textbox],[textbox])
643
+ # )
644
+
645
+ # (examples_hidden
646
+ # .change(answer_user_example, [textbox,examples_hidden, bot], [textbox, bot],queue = False)
647
+ # .success(change_tab,None,tabs)
648
+ # .success(fetch_sources,[textbox,dropdown_sources], [textbox,sources_textbox,docs_textbox,output_query,output_language])
649
+ # .success(answer_bot, [textbox,bot,docs_textbox,output_query,output_language,dropdown_audience], [textbox,bot],queue=True)
650
+ # .success(lambda x : textbox,[textbox],[textbox])
651
+ # )
652
+ # submit_button.click(answer_user, [textbox, bot], [textbox, bot], queue=True).then(
653
+ # answer_bot, [textbox,bot,dropdown_audience,dropdown_sources], [textbox,bot,sources_textbox]
654
+ # )
655
 
 
 
656
 
657
+ # with Modal(visible=True) as first_modal:
658
+ # gr.Markdown("# Welcome to ClimateQ&A !")
659
+
660
+ # gr.Markdown("### Examples")
661
+
662
+ # examples = gr.Examples(
663
+ # ["Yo ça roule","ça boume"],
664
+ # [examples_hidden],
665
+ # examples_per_page=8,
666
+ # run_on_click=False,
667
+ # elem_id="examples",
668
+ # api_name="examples",
669
+ # )
670
 
671
 
672
+ # submit.click(lambda: Modal(visible=True), None, config_modal)
673
+
674
+
675
+ demo.queue()
676
 
677
+ demo.launch()
climateqa/constants.py CHANGED
@@ -1,6 +1,4 @@
1
  POSSIBLE_REPORTS = [
2
- "IPBES IABWFH SPM",
3
- "IPBES CBL SPM",
4
  "IPCC AR6 WGI SPM",
5
  "IPCC AR6 WGI FR",
6
  "IPCC AR6 WGI TS",
@@ -44,25 +42,4 @@ POSSIBLE_REPORTS = [
44
  "IPBES IAS A C5",
45
  "IPBES IAS A C6",
46
  "IPBES IAS A SPM"
47
- ]
48
-
49
- OWID_CATEGORIES = ['Access to Energy', 'Agricultural Production',
50
- 'Agricultural Regulation & Policy', 'Air Pollution',
51
- 'Animal Welfare', 'Antibiotics', 'Biodiversity', 'Biofuels',
52
- 'Biological & Chemical Weapons', 'CO2 & Greenhouse Gas Emissions',
53
- 'COVID-19', 'Clean Water', 'Clean Water & Sanitation',
54
- 'Climate Change', 'Crop Yields', 'Diet Compositions',
55
- 'Electricity', 'Electricity Mix', 'Energy', 'Energy Efficiency',
56
- 'Energy Prices', 'Environmental Impacts of Food Production',
57
- 'Environmental Protection & Regulation', 'Famines', 'Farm Size',
58
- 'Fertilizers', 'Fish & Overfishing', 'Food Supply', 'Food Trade',
59
- 'Food Waste', 'Food and Agriculture', 'Forests & Deforestation',
60
- 'Fossil Fuels', 'Future Population Growth',
61
- 'Hunger & Undernourishment', 'Indoor Air Pollution', 'Land Use',
62
- 'Land Use & Yields in Agriculture', 'Lead Pollution',
63
- 'Meat & Dairy Production', 'Metals & Minerals',
64
- 'Natural Disasters', 'Nuclear Energy', 'Nuclear Weapons',
65
- 'Oil Spills', 'Outdoor Air Pollution', 'Ozone Layer', 'Pandemics',
66
- 'Pesticides', 'Plastic Pollution', 'Renewable Energy', 'Soil',
67
- 'Transport', 'Urbanization', 'Waste Management', 'Water Pollution',
68
- 'Water Use & Stress', 'Wildfires']
 
1
  POSSIBLE_REPORTS = [
 
 
2
  "IPCC AR6 WGI SPM",
3
  "IPCC AR6 WGI FR",
4
  "IPCC AR6 WGI TS",
 
42
  "IPBES IAS A C5",
43
  "IPBES IAS A C6",
44
  "IPBES IAS A SPM"
45
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/__init__.py DELETED
File without changes
climateqa/engine/chains/answer_ai_impact.py DELETED
@@ -1,46 +0,0 @@
1
- from langchain.prompts import ChatPromptTemplate
2
- from langchain_core.output_parsers import StrOutputParser
3
-
4
-
5
- prompt_template = """
6
- You are ClimateQ&A, an helpful AI Assistant specialized in answering climate-related questions using info from the IPCC and/or IPBES reports.
7
- Always stay true to climate and nature science and do not make up information.
8
- If you do not know the answer, just say you do not know.
9
-
10
- ## Guidelines
11
- - Explain that the environmental impact of AI is not covered by the IPCC or IPBES reports, but you can recommend info based on the sources below
12
- - Answer the question in the original language of the question
13
-
14
- ## Sources
15
- - You can propose to visit this page https://climateqa.com/docs/carbon-footprint/ to learn more about ClimateQ&A's own carbon footprint
16
- - You can recommend to look at the work of the AI & climate expert scientist Sasha Luccioni with in in particular those papers
17
- - Power Hungry Processing: Watts Driving the Cost of AI Deployment? - https://arxiv.org/abs/2311.16863 - about the carbon footprint at the inference stage of AI models
18
- - Counting Carbon: A Survey of Factors Influencing the Emissions of Machine Learning - https://arxiv.org/abs/2302.08476
19
- - Estimating the Carbon Footprint of BLOOM, a 176B Parameter Language Model - https://arxiv.org/abs/2211.02001 - about the carbon footprint of training a large language model
20
- - You can also recommend the following tools to calculate the carbon footprint of AI models
21
- - CodeCarbon - https://github.com/mlco2/codecarbon to measure the carbon footprint of your code
22
- - Ecologits - https://ecologits.ai/ to measure the carbon footprint of using LLMs APIs such
23
- """
24
-
25
-
26
- def make_ai_impact_chain(llm):
27
-
28
- prompt = ChatPromptTemplate.from_messages([
29
- ("system", prompt_template),
30
- ("user", "{question}")
31
- ])
32
-
33
- chain = prompt | llm | StrOutputParser()
34
- chain = chain.with_config({"run_name":"ai_impact_chain"})
35
-
36
- return chain
37
-
38
- def make_ai_impact_node(llm):
39
-
40
- ai_impact_chain = make_ai_impact_chain(llm)
41
-
42
- async def answer_ai_impact(state,config):
43
- answer = await ai_impact_chain.ainvoke({"question":state["user_input"]},config)
44
- return {"answer":answer}
45
-
46
- return answer_ai_impact
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/answer_chitchat.py DELETED
@@ -1,56 +0,0 @@
1
- from langchain.prompts import ChatPromptTemplate
2
- from langchain_core.output_parsers import StrOutputParser
3
-
4
-
5
- chitchat_prompt_template = """
6
- You are ClimateQ&A, an helpful AI Assistant specialized in answering climate-related questions using info from the IPCC and/or IPBES reports.
7
- Always stay true to climate and nature science and do not make up information.
8
- If you do not know the answer, just say you do not know.
9
-
10
- ## Guidelines
11
- - If it's a conversational question, you can normally chat with the user
12
- - If the question is not related to any topic about the environment, refuse to answer and politely ask the user to ask another question about the environment
13
- - If the user ask if you speak any language, you can say you speak all languages :)
14
- - If the user ask about the bot itself "ClimateQ&A", you can explain that you are an AI assistant specialized in answering climate-related questions using info from the IPCC and/or IPBES reports and propose to visit the website here https://climateqa.com/docs/intro/ for more information
15
- - If the question is about ESG regulations, standards, or frameworks like the CSRD, TCFD, SASB, GRI, CDP, etc., you can explain that this is not a topic covered by the IPCC or IPBES reports.
16
- - Precise that you are specialized in finding trustworthy information from the scientific reports of the IPCC and IPBES and other scientific litterature
17
- - If relevant you can propose up to 3 example of questions they could ask from the IPCC or IPBES reports from the examples below
18
- - Always answer in the original language of the question
19
-
20
- ## Examples of questions you can suggest (in the original language of the question)
21
- "What evidence do we have of climate change?",
22
- "Are human activities causing global warming?",
23
- "What are the impacts of climate change?",
24
- "Can climate change be reversed?",
25
- "What is the difference between climate change and global warming?",
26
- """
27
-
28
-
29
- def make_chitchat_chain(llm):
30
-
31
- prompt = ChatPromptTemplate.from_messages([
32
- ("system", chitchat_prompt_template),
33
- ("user", "{question}")
34
- ])
35
-
36
- chain = prompt | llm | StrOutputParser()
37
- chain = chain.with_config({"run_name":"chitchat_chain"})
38
-
39
- return chain
40
-
41
-
42
-
43
- def make_chitchat_node(llm):
44
-
45
- chitchat_chain = make_chitchat_chain(llm)
46
-
47
- async def answer_chitchat(state,config):
48
- print("---- Answer chitchat ----")
49
-
50
- answer = await chitchat_chain.ainvoke({"question":state["user_input"]},config)
51
- state["answer"] = answer
52
- return state
53
- # return {"answer":answer}
54
-
55
- return answer_chitchat
56
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/chitchat_categorization.py DELETED
@@ -1,43 +0,0 @@
1
-
2
- from langchain_core.pydantic_v1 import BaseModel, Field
3
- from typing import List
4
- from typing import Literal
5
- from langchain.prompts import ChatPromptTemplate
6
- from langchain_core.utils.function_calling import convert_to_openai_function
7
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
8
-
9
-
10
- class IntentCategorizer(BaseModel):
11
- """Analyzing the user message input"""
12
-
13
- environment: bool = Field(
14
- description="Return 'True' if the question relates to climate change, the environment, nature, etc. (Example: should I eat fish?). Return 'False' if the question is just chit chat or not related to the environment or climate change.",
15
- )
16
-
17
-
18
- def make_chitchat_intent_categorization_chain(llm):
19
-
20
- openai_functions = [convert_to_openai_function(IntentCategorizer)]
21
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"IntentCategorizer"})
22
-
23
- prompt = ChatPromptTemplate.from_messages([
24
- ("system", "You are a helpful assistant, you will analyze, translate and reformulate the user input message using the function provided"),
25
- ("user", "input: {input}")
26
- ])
27
-
28
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
29
- return chain
30
-
31
-
32
- def make_chitchat_intent_categorization_node(llm):
33
-
34
- categorization_chain = make_chitchat_intent_categorization_chain(llm)
35
-
36
- def categorize_message(state):
37
- output = categorization_chain.invoke({"input": state["user_input"]})
38
- print(f"\n\nChit chat output intent categorization: {output}\n")
39
- state["search_graphs_chitchat"] = output["environment"]
40
- print(f"\n\nChit chat output intent categorization: {state}\n")
41
- return state
42
-
43
- return categorize_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/graph_retriever.py DELETED
@@ -1,128 +0,0 @@
1
- import sys
2
- import os
3
- from contextlib import contextmanager
4
-
5
- from ..reranker import rerank_docs
6
- from ..graph_retriever import retrieve_graphs # GraphRetriever
7
- from ...utils import remove_duplicates_keep_highest_score
8
-
9
-
10
- def divide_into_parts(target, parts):
11
- # Base value for each part
12
- base = target // parts
13
- # Remainder to distribute
14
- remainder = target % parts
15
- # List to hold the result
16
- result = []
17
-
18
- for i in range(parts):
19
- if i < remainder:
20
- # These parts get base value + 1
21
- result.append(base + 1)
22
- else:
23
- # The rest get the base value
24
- result.append(base)
25
-
26
- return result
27
-
28
-
29
- @contextmanager
30
- def suppress_output():
31
- # Open a null device
32
- with open(os.devnull, 'w') as devnull:
33
- # Store the original stdout and stderr
34
- old_stdout = sys.stdout
35
- old_stderr = sys.stderr
36
- # Redirect stdout and stderr to the null device
37
- sys.stdout = devnull
38
- sys.stderr = devnull
39
- try:
40
- yield
41
- finally:
42
- # Restore stdout and stderr
43
- sys.stdout = old_stdout
44
- sys.stderr = old_stderr
45
-
46
-
47
- def make_graph_retriever_node(vectorstore, reranker, rerank_by_question=True, k_final=15, k_before_reranking=100):
48
-
49
- async def node_retrieve_graphs(state):
50
- print("---- Retrieving graphs ----")
51
-
52
- POSSIBLE_SOURCES = ["IEA", "OWID"]
53
- questions = state["remaining_questions"] if state["remaining_questions"] is not None and state["remaining_questions"]!=[] else [state["query"]]
54
- # sources_input = state["sources_input"]
55
- sources_input = ["auto"]
56
-
57
- auto_mode = "auto" in sources_input
58
-
59
- # There are several options to get the final top k
60
- # Option 1 - Get 100 documents by question and rerank by question
61
- # Option 2 - Get 100/n documents by question and rerank the total
62
- if rerank_by_question:
63
- k_by_question = divide_into_parts(k_final,len(questions))
64
-
65
- docs = []
66
-
67
- for i,q in enumerate(questions):
68
-
69
- question = q["question"] if isinstance(q, dict) else q
70
-
71
- print(f"Subquestion {i}: {question}")
72
-
73
- # If auto mode, we use all sources
74
- if auto_mode:
75
- sources = POSSIBLE_SOURCES
76
- # Otherwise, we use the config
77
- else:
78
- sources = sources_input
79
-
80
- if any([x in POSSIBLE_SOURCES for x in sources]):
81
-
82
- sources = [x for x in sources if x in POSSIBLE_SOURCES]
83
-
84
- # Search the document store using the retriever
85
- docs_question = await retrieve_graphs(
86
- query = question,
87
- vectorstore = vectorstore,
88
- sources = sources,
89
- k_total = k_before_reranking,
90
- threshold = 0.5,
91
- )
92
- # docs_question = retriever.get_relevant_documents(question)
93
-
94
- # Rerank
95
- if reranker is not None and docs_question!=[]:
96
- with suppress_output():
97
- docs_question = rerank_docs(reranker,docs_question,question)
98
- else:
99
- # Add a default reranking score
100
- for doc in docs_question:
101
- doc.metadata["reranking_score"] = doc.metadata["similarity_score"]
102
-
103
- # If rerank by question we select the top documents for each question
104
- if rerank_by_question:
105
- docs_question = docs_question[:k_by_question[i]]
106
-
107
- # Add sources used in the metadata
108
- for doc in docs_question:
109
- doc.metadata["sources_used"] = sources
110
-
111
- print(f"{len(docs_question)} graphs retrieved for subquestion {i + 1}: {docs_question}")
112
-
113
- docs.extend(docs_question)
114
-
115
- else:
116
- print(f"There are no graphs which match the sources filtered on. Sources filtered on: {sources}. Sources available: {POSSIBLE_SOURCES}.")
117
-
118
- # Remove duplicates and keep the duplicate document with the highest reranking score
119
- docs = remove_duplicates_keep_highest_score(docs)
120
-
121
- # Sorting the list in descending order by rerank_score
122
- # Then select the top k
123
- docs = sorted(docs, key=lambda x: x.metadata["reranking_score"], reverse=True)
124
- docs = docs[:k_final]
125
-
126
- return {"recommended_content": docs}
127
-
128
- return node_retrieve_graphs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/intent_categorization.py DELETED
@@ -1,90 +0,0 @@
1
-
2
- from langchain_core.pydantic_v1 import BaseModel, Field
3
- from typing import List
4
- from typing import Literal
5
- from langchain.prompts import ChatPromptTemplate
6
- from langchain_core.utils.function_calling import convert_to_openai_function
7
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
8
-
9
-
10
- class IntentCategorizer(BaseModel):
11
- """Analyzing the user message input"""
12
-
13
- language: str = Field(
14
- description="Find the language of the message input in full words (ex: French, English, Spanish, ...), defaults to English",
15
- default="English",
16
- )
17
- intent: str = Field(
18
- enum=[
19
- "ai_impact",
20
- # "geo_info",
21
- # "esg",
22
- "search",
23
- "chitchat",
24
- ],
25
- description="""
26
- Categorize the user input in one of the following category
27
- Any question
28
-
29
- Examples:
30
- - ai_impact = Environmental impacts of AI: "What are the environmental impacts of AI", "How does AI affect the environment"
31
- - search = Searching for any quesiton about climate change, energy, biodiversity, nature, and everything we can find the IPCC or IPBES reports or scientific papers,
32
- - chitchat = Any general question that is not related to the environment or climate change or just conversational, or if you don't think searching the IPCC or IPBES reports would be relevant
33
- """,
34
- # - geo_info = Geolocated info about climate change: Any question where the user wants to know localized impacts of climate change, eg: "What will be the temperature in Marseille in 2050"
35
- # - esg = Any question about the ESG regulation, frameworks and standards like the CSRD, TCFD, SASB, GRI, CDP, etc.
36
-
37
- )
38
-
39
-
40
-
41
- def make_intent_categorization_chain(llm):
42
-
43
- openai_functions = [convert_to_openai_function(IntentCategorizer)]
44
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"IntentCategorizer"})
45
-
46
- prompt = ChatPromptTemplate.from_messages([
47
- ("system", "You are a helpful assistant, you will analyze, translate and categorize the user input message using the function provided. Categorize the user input as ai ONLY if it is related to Artificial Intelligence, search if it is related to the environment, climate change, energy, biodiversity, nature, etc. and chitchat if it is just general conversation."),
48
- ("user", "input: {input}")
49
- ])
50
-
51
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
52
- return chain
53
-
54
-
55
- def make_intent_categorization_node(llm):
56
-
57
- categorization_chain = make_intent_categorization_chain(llm)
58
-
59
- def categorize_message(state):
60
- print("---- Categorize_message ----")
61
-
62
- output = categorization_chain.invoke({"input": state["user_input"]})
63
- print(f"\n\nOutput intent categorization: {output}\n")
64
- if "language" not in output: output["language"] = "English"
65
- output["query"] = state["user_input"]
66
- return output
67
-
68
- return categorize_message
69
-
70
-
71
-
72
-
73
- # SAMPLE_QUESTIONS = [
74
- # "Est-ce que l'IA a un impact sur l'environnement ?",
75
- # "Que dit le GIEC sur l'impact de l'IA",
76
- # "Qui sont les membres du GIEC",
77
- # "What is the impact of El Nino ?",
78
- # "Yo",
79
- # "Hello ça va bien ?",
80
- # "Par qui as tu été créé ?",
81
- # "What role do cloud formations play in modulating the Earth's radiative balance, and how are they represented in current climate models?",
82
- # "Which industries have the highest GHG emissions?",
83
- # "What are invasive alien species and how do they threaten biodiversity and ecosystems?",
84
- # "Are human activities causing global warming?",
85
- # "What is the motivation behind mining the deep seabed?",
86
- # "Tu peux m'écrire un poème sur le changement climatique ?",
87
- # "Tu peux m'écrire un poème sur les bonbons ?",
88
- # "What will be the temperature in 2100 in Strasbourg?",
89
- # "C'est quoi le lien entre biodiversity and changement climatique ?",
90
- # ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/keywords_extraction.py DELETED
@@ -1,40 +0,0 @@
1
-
2
- from langchain_core.pydantic_v1 import BaseModel, Field
3
- from typing import List
4
- from typing import Literal
5
- from langchain.prompts import ChatPromptTemplate
6
- from langchain_core.utils.function_calling import convert_to_openai_function
7
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
8
-
9
-
10
- class KeywordExtraction(BaseModel):
11
- """
12
- Analyzing the user query to extract keywords to feed a search engine
13
- """
14
-
15
- keywords: List[str] = Field(
16
- description="""
17
- Extract the keywords from the user query to feed a search engine as a list
18
- Avoid adding super specific keywords to prefer general keywords
19
- Maximum 3 keywords
20
-
21
- Examples:
22
- - "What is the impact of deep sea mining ?" -> ["deep sea mining"]
23
- - "How will El Nino be impacted by climate change" -> ["el nino","climate change"]
24
- - "Is climate change a hoax" -> ["climate change","hoax"]
25
- """
26
- )
27
-
28
-
29
- def make_keywords_extraction_chain(llm):
30
-
31
- openai_functions = [convert_to_openai_function(KeywordExtraction)]
32
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"KeywordExtraction"})
33
-
34
- prompt = ChatPromptTemplate.from_messages([
35
- ("system", "You are a helpful assistant"),
36
- ("user", "input: {input}")
37
- ])
38
-
39
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
40
- return chain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/query_transformation.py DELETED
@@ -1,201 +0,0 @@
1
-
2
-
3
- from langchain_core.pydantic_v1 import BaseModel, Field
4
- from typing import List
5
- from typing import Literal
6
- from langchain.prompts import ChatPromptTemplate
7
- from langchain_core.utils.function_calling import convert_to_openai_function
8
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
9
-
10
-
11
- ROUTING_INDEX = {
12
- "Vector":["IPCC","IPBES","IPOS"],
13
- "OpenAlex":["OpenAlex"],
14
- }
15
-
16
- POSSIBLE_SOURCES = [y for values in ROUTING_INDEX.values() for y in values]
17
-
18
- # Prompt from the original paper https://arxiv.org/pdf/2305.14283
19
- # Query Rewriting for Retrieval-Augmented Large Language Models
20
- class QueryDecomposition(BaseModel):
21
- """
22
- Decompose the user query into smaller parts to think step by step to answer this question
23
- Act as a simple planning agent
24
- """
25
-
26
- questions: List[str] = Field(
27
- description="""
28
- Think step by step to answer this question, and provide one or several search engine questions in English for knowledge that you need.
29
- Suppose that the user is looking for information about climate change, energy, biodiversity, nature, and everything we can find the IPCC reports and scientific literature
30
- - If it's already a standalone and explicit question, just return the reformulated question for the search engine
31
- - If you need to decompose the question, output a list of maximum 2 to 3 questions
32
- """
33
- )
34
-
35
-
36
- class Location(BaseModel):
37
- country:str = Field(...,description="The country if directly mentioned or inferred from the location (cities, regions, adresses), ex: France, USA, ...")
38
- location:str = Field(...,description="The specific place if mentioned (cities, regions, addresses), ex: Marseille, New York, Wisconsin, ...")
39
-
40
- class QueryAnalysis(BaseModel):
41
- """
42
- Analyzing the user query to extract topics, sources and date
43
- Also do query expansion to get alternative search queries
44
- Also provide simple keywords to feed a search engine
45
- """
46
-
47
- # keywords: List[str] = Field(
48
- # description="""
49
- # Extract the keywords from the user query to feed a search engine as a list
50
- # Maximum 3 keywords
51
-
52
- # Examples:
53
- # - "What is the impact of deep sea mining ?" -> deep sea mining
54
- # - "How will El Nino be impacted by climate change" -> el nino;climate change
55
- # - "Is climate change a hoax" -> climate change;hoax
56
- # """
57
- # )
58
-
59
- # alternative_queries: List[str] = Field(
60
- # description="""
61
- # Generate alternative search questions from the user query to feed a search engine
62
- # """
63
- # )
64
-
65
- # step_back_question: str = Field(
66
- # description="""
67
- # You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer.
68
- # This questions should help you get more context and information about the user query
69
- # """
70
- # )
71
-
72
- sources: List[Literal["IPCC", "IPBES", "IPOS"]] = Field( #,"OpenAlex"]] = Field(
73
- ...,
74
- description="""
75
- Given a user question choose which documents would be most relevant for answering their question,
76
- - IPCC is for questions about climate change, energy, impacts, and everything we can find the IPCC reports
77
- - IPBES is for questions about biodiversity and nature
78
- - IPOS is for questions about the ocean and deep sea mining
79
- """,
80
- # - OpenAlex is for any other questions that are not in the previous categories but could be found in the scientific litterature
81
- )
82
- # topics: List[Literal[
83
- # "Climate change",
84
- # "Biodiversity",
85
- # "Energy",
86
- # "Decarbonization",
87
- # "Climate science",
88
- # "Nature",
89
- # "Climate policy and justice",
90
- # "Oceans",
91
- # "Deep sea mining",
92
- # "ESG and regulations",
93
- # "CSRD",
94
- # ]] = Field(
95
- # ...,
96
- # description = """
97
- # Choose the topics that are most relevant to the user query, ex: Climate change, Energy, Biodiversity, ...
98
- # """,
99
- # )
100
- # date: str = Field(description="The date or period mentioned, ex: 2050, between 2020 and 2050")
101
- # location:Location
102
-
103
-
104
- def make_query_decomposition_chain(llm):
105
-
106
- openai_functions = [convert_to_openai_function(QueryDecomposition)]
107
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"QueryDecomposition"})
108
-
109
- prompt = ChatPromptTemplate.from_messages([
110
- ("system", "You are a helpful assistant, you will analyze, translate and reformulate the user input message using the function provided"),
111
- ("user", "input: {input}")
112
- ])
113
-
114
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
115
- return chain
116
-
117
-
118
- def make_query_rewriter_chain(llm):
119
-
120
- openai_functions = [convert_to_openai_function(QueryAnalysis)]
121
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"QueryAnalysis"})
122
-
123
-
124
-
125
- prompt = ChatPromptTemplate.from_messages([
126
- ("system", "You are a helpful assistant, you will analyze, translate and reformulate the user input message using the function provided"),
127
- ("user", "input: {input}")
128
- ])
129
-
130
-
131
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
132
- return chain
133
-
134
-
135
- def make_query_transform_node(llm,k_final=15):
136
-
137
- decomposition_chain = make_query_decomposition_chain(llm)
138
- rewriter_chain = make_query_rewriter_chain(llm)
139
-
140
- def transform_query(state):
141
- print("---- Transform query ----")
142
-
143
-
144
- if "sources_auto" not in state or state["sources_auto"] is None or state["sources_auto"] is False:
145
- auto_mode = False
146
- else:
147
- auto_mode = True
148
-
149
- sources_input = state.get("sources_input")
150
- if sources_input is None: sources_input = ROUTING_INDEX["Vector"]
151
-
152
- new_state = {}
153
-
154
- # Decomposition
155
- decomposition_output = decomposition_chain.invoke({"input":state["query"]})
156
- new_state.update(decomposition_output)
157
-
158
- # Query Analysis
159
- questions = []
160
- for question in new_state["questions"]:
161
- question_state = {"question":question}
162
- analysis_output = rewriter_chain.invoke({"input":question})
163
-
164
- # TODO WARNING llm should always return smthg
165
- # The case when the llm does not return any sources
166
- if not analysis_output["sources"] or not all(source in ["IPCC", "IPBS", "IPOS"] for source in analysis_output["sources"]):
167
- analysis_output["sources"] = ["IPCC", "IPBES", "IPOS"]
168
-
169
- question_state.update(analysis_output)
170
- questions.append(question_state)
171
-
172
- # Explode the questions into multiple questions with different sources
173
- new_questions = []
174
- for q in questions:
175
- question,sources = q["question"],q["sources"]
176
-
177
- # If not auto mode we take the configuration
178
- if not auto_mode:
179
- sources = sources_input
180
-
181
- for index,index_sources in ROUTING_INDEX.items():
182
- selected_sources = list(set(sources).intersection(index_sources))
183
- if len(selected_sources) > 0:
184
- new_questions.append({"question":question,"sources":selected_sources,"index":index})
185
-
186
- # # Add the number of questions to search
187
- # k_by_question = k_final // len(new_questions)
188
- # for q in new_questions:
189
- # q["k"] = k_by_question
190
-
191
- # new_state["questions"] = new_questions
192
- # new_state["remaining_questions"] = new_questions
193
-
194
-
195
- new_state = {
196
- "remaining_questions":new_questions,
197
- "n_questions":len(new_questions),
198
- }
199
- return new_state
200
-
201
- return transform_query
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/retrieve_documents.py DELETED
@@ -1,310 +0,0 @@
1
- import sys
2
- import os
3
- from contextlib import contextmanager
4
-
5
- from langchain_core.tools import tool
6
- from langchain_core.runnables import chain
7
- from langchain_core.runnables import RunnableParallel, RunnablePassthrough
8
- from langchain_core.runnables import RunnableLambda
9
-
10
- from ..reranker import rerank_docs
11
- # from ...knowledge.retriever import ClimateQARetriever
12
- from ...knowledge.openalex import OpenAlexRetriever
13
- from .keywords_extraction import make_keywords_extraction_chain
14
- from ..utils import log_event
15
- from langchain_core.vectorstores import VectorStore
16
- from typing import List
17
- from langchain_core.documents.base import Document
18
-
19
-
20
-
21
- def divide_into_parts(target, parts):
22
- # Base value for each part
23
- base = target // parts
24
- # Remainder to distribute
25
- remainder = target % parts
26
- # List to hold the result
27
- result = []
28
-
29
- for i in range(parts):
30
- if i < remainder:
31
- # These parts get base value + 1
32
- result.append(base + 1)
33
- else:
34
- # The rest get the base value
35
- result.append(base)
36
-
37
- return result
38
-
39
-
40
- @contextmanager
41
- def suppress_output():
42
- # Open a null device
43
- with open(os.devnull, 'w') as devnull:
44
- # Store the original stdout and stderr
45
- old_stdout = sys.stdout
46
- old_stderr = sys.stderr
47
- # Redirect stdout and stderr to the null device
48
- sys.stdout = devnull
49
- sys.stderr = devnull
50
- try:
51
- yield
52
- finally:
53
- # Restore stdout and stderr
54
- sys.stdout = old_stdout
55
- sys.stderr = old_stderr
56
-
57
-
58
- @tool
59
- def query_retriever(question):
60
- """Just a dummy tool to simulate the retriever query"""
61
- return question
62
-
63
- def _add_sources_used_in_metadata(docs,sources,question,index):
64
- for doc in docs:
65
- doc.metadata["sources_used"] = sources
66
- doc.metadata["question_used"] = question
67
- doc.metadata["index_used"] = index
68
- return docs
69
-
70
- def _get_k_summary_by_question(n_questions):
71
- if n_questions == 0:
72
- return 0
73
- elif n_questions == 1:
74
- return 5
75
- elif n_questions == 2:
76
- return 3
77
- elif n_questions == 3:
78
- return 2
79
- else:
80
- return 1
81
-
82
- def _get_k_images_by_question(n_questions):
83
- if n_questions == 0:
84
- return 0
85
- elif n_questions == 1:
86
- return 7
87
- elif n_questions == 2:
88
- return 5
89
- elif n_questions == 3:
90
- return 3
91
- else:
92
- return 1
93
-
94
- def _add_metadata_and_score(docs: List) -> Document:
95
- # Add score to metadata
96
- docs_with_metadata = []
97
- for i,(doc,score) in enumerate(docs):
98
- doc.page_content = doc.page_content.replace("\r\n"," ")
99
- doc.metadata["similarity_score"] = score
100
- doc.metadata["content"] = doc.page_content
101
- if doc.metadata["page_number"] != "N/A":
102
- doc.metadata["page_number"] = int(doc.metadata["page_number"]) + 1
103
- else:
104
- doc.metadata["page_number"] = 1
105
- # doc.page_content = f"""Doc {i+1} - {doc.metadata['short_name']}: {doc.page_content}"""
106
- docs_with_metadata.append(doc)
107
- return docs_with_metadata
108
-
109
- async def get_IPCC_relevant_documents(
110
- query: str,
111
- vectorstore:VectorStore,
112
- sources:list = ["IPCC","IPBES","IPOS"],
113
- search_figures:bool = False,
114
- reports:list = [],
115
- threshold:float = 0.6,
116
- k_summary:int = 3,
117
- k_total:int = 10,
118
- k_images: int = 5,
119
- namespace:str = "vectors",
120
- min_size:int = 200,
121
- search_only:bool = False,
122
- ) :
123
-
124
- # Check if all elements in the list are either IPCC or IPBES
125
- assert isinstance(sources,list)
126
- assert sources
127
- assert all([x in ["IPCC","IPBES","IPOS"] for x in sources])
128
- assert k_total > k_summary, "k_total should be greater than k_summary"
129
-
130
- # Prepare base search kwargs
131
- filters = {}
132
-
133
- if len(reports) > 0:
134
- filters["short_name"] = {"$in":reports}
135
- else:
136
- filters["source"] = { "$in": sources}
137
-
138
- # INIT
139
- docs_summaries = []
140
- docs_full = []
141
- docs_images = []
142
-
143
- if search_only:
144
- # Only search for images if search_only is True
145
- if search_figures:
146
- filters_image = {
147
- **filters,
148
- "chunk_type":"image"
149
- }
150
- docs_images = vectorstore.similarity_search_with_score(query=query,filter = filters_image,k = k_images)
151
- docs_images = _add_metadata_and_score(docs_images)
152
- else:
153
- # Regular search flow for text and optionally images
154
- # Search for k_summary documents in the summaries dataset
155
- filters_summaries = {
156
- **filters,
157
- "chunk_type":"text",
158
- "report_type": { "$in":["SPM"]},
159
- }
160
-
161
- docs_summaries = vectorstore.similarity_search_with_score(query=query,filter = filters_summaries,k = k_summary)
162
- docs_summaries = [x for x in docs_summaries if x[1] > threshold]
163
-
164
- # Search for k_total - k_summary documents in the full reports dataset
165
- filters_full = {
166
- **filters,
167
- "chunk_type":"text",
168
- "report_type": { "$nin":["SPM"]},
169
- }
170
- k_full = k_total - len(docs_summaries)
171
- docs_full = vectorstore.similarity_search_with_score(query=query,filter = filters_full,k = k_full)
172
-
173
- if search_figures:
174
- # Images
175
- filters_image = {
176
- **filters,
177
- "chunk_type":"image"
178
- }
179
- docs_images = vectorstore.similarity_search_with_score(query=query,filter = filters_image,k = k_images)
180
-
181
- docs_summaries, docs_full, docs_images = _add_metadata_and_score(docs_summaries), _add_metadata_and_score(docs_full), _add_metadata_and_score(docs_images)
182
-
183
- # Filter if length are below threshold
184
- docs_summaries = [x for x in docs_summaries if len(x.page_content) > min_size]
185
- docs_full = [x for x in docs_full if len(x.page_content) > min_size]
186
-
187
- return {
188
- "docs_summaries" : docs_summaries,
189
- "docs_full" : docs_full,
190
- "docs_images" : docs_images,
191
- }
192
-
193
-
194
-
195
- # The chain callback is not necessary, but it propagates the langchain callbacks to the astream_events logger to display intermediate results
196
- # @chain
197
- async def retrieve_documents(state,config, vectorstore,reranker,llm,rerank_by_question=True, k_final=15, k_before_reranking=100, k_summary=5, k_images=5):
198
- """
199
- Retrieve and rerank documents based on the current question in the state.
200
-
201
- Args:
202
- state (dict): The current state containing documents, related content, relevant content sources, remaining questions and n_questions.
203
- config (dict): Configuration settings for logging and other purposes.
204
- vectorstore (object): The vector store used to retrieve relevant documents.
205
- reranker (object): The reranker used to rerank the retrieved documents.
206
- llm (object): The language model used for processing.
207
- rerank_by_question (bool, optional): Whether to rerank documents by question. Defaults to True.
208
- k_final (int, optional): The final number of documents to retrieve. Defaults to 15.
209
- k_before_reranking (int, optional): The number of documents to retrieve before reranking. Defaults to 100.
210
- k_summary (int, optional): The number of summary documents to retrieve. Defaults to 5.
211
- k_images (int, optional): The number of image documents to retrieve. Defaults to 5.
212
- Returns:
213
- dict: The updated state containing the retrieved and reranked documents, related content, and remaining questions.
214
- """
215
- print("---- Retrieve documents ----")
216
-
217
- # Get the documents from the state
218
- if "documents" in state and state["documents"] is not None:
219
- docs = state["documents"]
220
- else:
221
- docs = []
222
-
223
- # Get the related_content from the state
224
- if "related_content" in state and state["related_content"] is not None:
225
- related_content = state["related_content"]
226
- else:
227
- related_content = []
228
-
229
- search_figures = "Figures (IPCC/IPBES)" in state["relevant_content_sources"]
230
- search_only = state["search_only"]
231
-
232
- reports = state["reports"]
233
-
234
- # Get the current question
235
- current_question = state["remaining_questions"][0]
236
- remaining_questions = state["remaining_questions"][1:]
237
-
238
- k_by_question = k_final // state["n_questions"]
239
- k_summary_by_question = _get_k_summary_by_question(state["n_questions"])
240
- k_images_by_question = _get_k_images_by_question(state["n_questions"])
241
-
242
- sources = current_question["sources"]
243
- question = current_question["question"]
244
- index = current_question["index"]
245
-
246
- print(f"Retrieve documents for question: {question}")
247
- await log_event({"question":question,"sources":sources,"index":index},"log_retriever",config)
248
-
249
-
250
- if index == "Vector": # always true for now
251
- docs_question_dict = await get_IPCC_relevant_documents(
252
- query = question,
253
- vectorstore=vectorstore,
254
- search_figures = search_figures,
255
- sources = sources,
256
- min_size = 200,
257
- k_summary = k_summary_by_question,
258
- k_total = k_before_reranking,
259
- k_images = k_images_by_question,
260
- threshold = 0.5,
261
- search_only = search_only,
262
- reports = reports,
263
- )
264
-
265
-
266
- # Rerank
267
- if reranker is not None:
268
- with suppress_output():
269
- docs_question_summary_reranked = rerank_docs(reranker,docs_question_dict["docs_summaries"],question)
270
- docs_question_fulltext_reranked = rerank_docs(reranker,docs_question_dict["docs_full"],question)
271
- docs_question_images_reranked = rerank_docs(reranker,docs_question_dict["docs_images"],question)
272
- if rerank_by_question:
273
- docs_question_summary_reranked = sorted(docs_question_summary_reranked, key=lambda x: x.metadata["reranking_score"], reverse=True)
274
- docs_question_fulltext_reranked = sorted(docs_question_fulltext_reranked, key=lambda x: x.metadata["reranking_score"], reverse=True)
275
- docs_question_images_reranked = sorted(docs_question_images_reranked, key=lambda x: x.metadata["reranking_score"], reverse=True)
276
- else:
277
- docs_question = docs_question_dict["docs_summaries"] + docs_question_dict["docs_full"]
278
- # Add a default reranking score
279
- for doc in docs_question:
280
- doc.metadata["reranking_score"] = doc.metadata["similarity_score"]
281
-
282
- docs_question = docs_question_summary_reranked + docs_question_fulltext_reranked
283
- docs_question = docs_question[:k_by_question]
284
- images_question = docs_question_images_reranked[:k_images]
285
-
286
- if reranker is not None and rerank_by_question:
287
- docs_question = sorted(docs_question, key=lambda x: x.metadata["reranking_score"], reverse=True)
288
-
289
- # Add sources used in the metadata
290
- docs_question = _add_sources_used_in_metadata(docs_question,sources,question,index)
291
- images_question = _add_sources_used_in_metadata(images_question,sources,question,index)
292
-
293
- # Add to the list of docs
294
- docs.extend(docs_question)
295
- related_content.extend(images_question)
296
- new_state = {"documents":docs, "related_contents": related_content,"remaining_questions":remaining_questions}
297
- return new_state
298
-
299
-
300
-
301
- def make_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_final=15, k_before_reranking=100, k_summary=5):
302
- @chain
303
- async def retrieve_docs(state, config):
304
- state = await retrieve_documents(state,config, vectorstore,reranker,llm,rerank_by_question, k_final, k_before_reranking, k_summary)
305
- return state
306
-
307
- return retrieve_docs
308
-
309
-
310
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/retrieve_papers.py DELETED
@@ -1,95 +0,0 @@
1
- from climateqa.engine.keywords import make_keywords_chain
2
- from climateqa.engine.llm import get_llm
3
- from climateqa.knowledge.openalex import OpenAlex
4
- from climateqa.engine.chains.answer_rag import make_rag_papers_chain
5
- from front.utils import make_html_papers
6
- from climateqa.engine.reranker import get_reranker
7
-
8
- oa = OpenAlex()
9
-
10
- llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
11
- reranker = get_reranker("nano")
12
-
13
-
14
- papers_cols_widths = {
15
- "id":100,
16
- "title":300,
17
- "doi":100,
18
- "publication_year":100,
19
- "abstract":500,
20
- "is_oa":50,
21
- }
22
-
23
- papers_cols = list(papers_cols_widths.keys())
24
- papers_cols_widths = list(papers_cols_widths.values())
25
-
26
-
27
-
28
- def generate_keywords(query):
29
- chain = make_keywords_chain(llm)
30
- keywords = chain.invoke(query)
31
- keywords = " AND ".join(keywords["keywords"])
32
- return keywords
33
-
34
-
35
- async def find_papers(query,after, relevant_content_sources, reranker= reranker):
36
- if "Papers (OpenAlex)" in relevant_content_sources:
37
- summary = ""
38
- keywords = generate_keywords(query)
39
- df_works = oa.search(keywords,after = after)
40
-
41
- print(f"Found {len(df_works)} papers")
42
-
43
- if not df_works.empty:
44
- df_works = df_works.dropna(subset=["abstract"])
45
- df_works = df_works[df_works["abstract"] != ""].reset_index(drop = True)
46
- df_works = oa.rerank(query,df_works,reranker)
47
- df_works = df_works.sort_values("rerank_score",ascending=False)
48
- docs_html = []
49
- for i in range(10):
50
- docs_html.append(make_html_papers(df_works, i))
51
- docs_html = "".join(docs_html)
52
- G = oa.make_network(df_works)
53
-
54
- height = "750px"
55
- network = oa.show_network(G,color_by = "rerank_score",notebook=False,height = height)
56
- network_html = network.generate_html()
57
-
58
- network_html = network_html.replace("'", "\"")
59
- css_to_inject = "<style>#mynetwork { border: none !important; } .card { border: none !important; }</style>"
60
- network_html = network_html + css_to_inject
61
-
62
-
63
- network_html = f"""<iframe style="width: 100%; height: {height};margin:0 auto" name="result" allow="midi; geolocation; microphone; camera;
64
- display-capture; encrypted-media;" sandbox="allow-modals allow-forms
65
- allow-scripts allow-same-origin allow-popups
66
- allow-top-navigation-by-user-activation allow-downloads" allowfullscreen=""
67
- allowpaymentrequest="" frameborder="0" srcdoc='{network_html}'></iframe>"""
68
-
69
-
70
- docs = df_works["content"].head(10).tolist()
71
-
72
- df_works = df_works.reset_index(drop = True).reset_index().rename(columns = {"index":"doc"})
73
- df_works["doc"] = df_works["doc"] + 1
74
- df_works = df_works[papers_cols]
75
-
76
- yield docs_html, network_html, summary
77
-
78
- chain = make_rag_papers_chain(llm)
79
- result = chain.astream_log({"question": query,"docs": docs,"language":"English"})
80
- path_answer = "/logs/StrOutputParser/streamed_output/-"
81
-
82
- async for op in result:
83
-
84
- op = op.ops[0]
85
-
86
- if op['path'] == path_answer: # reforulated question
87
- new_token = op['value'] # str
88
- summary += new_token
89
- else:
90
- continue
91
- yield docs_html, network_html, summary
92
- else :
93
- print("No papers found")
94
- else :
95
- yield "","", ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/retriever.py DELETED
@@ -1,126 +0,0 @@
1
- # import sys
2
- # import os
3
- # from contextlib import contextmanager
4
-
5
- # from ..reranker import rerank_docs
6
- # from ...knowledge.retriever import ClimateQARetriever
7
-
8
-
9
-
10
-
11
- # def divide_into_parts(target, parts):
12
- # # Base value for each part
13
- # base = target // parts
14
- # # Remainder to distribute
15
- # remainder = target % parts
16
- # # List to hold the result
17
- # result = []
18
-
19
- # for i in range(parts):
20
- # if i < remainder:
21
- # # These parts get base value + 1
22
- # result.append(base + 1)
23
- # else:
24
- # # The rest get the base value
25
- # result.append(base)
26
-
27
- # return result
28
-
29
-
30
- # @contextmanager
31
- # def suppress_output():
32
- # # Open a null device
33
- # with open(os.devnull, 'w') as devnull:
34
- # # Store the original stdout and stderr
35
- # old_stdout = sys.stdout
36
- # old_stderr = sys.stderr
37
- # # Redirect stdout and stderr to the null device
38
- # sys.stdout = devnull
39
- # sys.stderr = devnull
40
- # try:
41
- # yield
42
- # finally:
43
- # # Restore stdout and stderr
44
- # sys.stdout = old_stdout
45
- # sys.stderr = old_stderr
46
-
47
-
48
-
49
- # def make_retriever_node(vectorstore,reranker,rerank_by_question=True, k_final=15, k_before_reranking=100, k_summary=5):
50
-
51
- # def retrieve_documents(state):
52
-
53
- # POSSIBLE_SOURCES = ["IPCC","IPBES","IPOS"] # ,"OpenAlex"]
54
- # questions = state["questions"]
55
-
56
- # # Use sources from the user input or from the LLM detection
57
- # if "sources_input" not in state or state["sources_input"] is None:
58
- # sources_input = ["auto"]
59
- # else:
60
- # sources_input = state["sources_input"]
61
- # auto_mode = "auto" in sources_input
62
-
63
- # # There are several options to get the final top k
64
- # # Option 1 - Get 100 documents by question and rerank by question
65
- # # Option 2 - Get 100/n documents by question and rerank the total
66
- # if rerank_by_question:
67
- # k_by_question = divide_into_parts(k_final,len(questions))
68
-
69
- # docs = []
70
-
71
- # for i,q in enumerate(questions):
72
-
73
- # sources = q["sources"]
74
- # question = q["question"]
75
-
76
- # # If auto mode, we use the sources detected by the LLM
77
- # if auto_mode:
78
- # sources = [x for x in sources if x in POSSIBLE_SOURCES]
79
-
80
- # # Otherwise, we use the config
81
- # else:
82
- # sources = sources_input
83
-
84
- # # Search the document store using the retriever
85
- # # Configure high top k for further reranking step
86
- # retriever = ClimateQARetriever(
87
- # vectorstore=vectorstore,
88
- # sources = sources,
89
- # # reports = ias_reports,
90
- # min_size = 200,
91
- # k_summary = k_summary,
92
- # k_total = k_before_reranking,
93
- # threshold = 0.5,
94
- # )
95
- # docs_question = retriever.get_relevant_documents(question)
96
-
97
- # # Rerank
98
- # if reranker is not None:
99
- # with suppress_output():
100
- # docs_question = rerank_docs(reranker,docs_question,question)
101
- # else:
102
- # # Add a default reranking score
103
- # for doc in docs_question:
104
- # doc.metadata["reranking_score"] = doc.metadata["similarity_score"]
105
-
106
- # # If rerank by question we select the top documents for each question
107
- # if rerank_by_question:
108
- # docs_question = docs_question[:k_by_question[i]]
109
-
110
- # # Add sources used in the metadata
111
- # for doc in docs_question:
112
- # doc.metadata["sources_used"] = sources
113
-
114
- # # Add to the list of docs
115
- # docs.extend(docs_question)
116
-
117
- # # Sorting the list in descending order by rerank_score
118
- # # Then select the top k
119
- # docs = sorted(docs, key=lambda x: x.metadata["reranking_score"], reverse=True)
120
- # docs = docs[:k_final]
121
-
122
- # new_state = {"documents":docs}
123
- # return new_state
124
-
125
- # return retrieve_documents
126
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/sample_router.py DELETED
@@ -1,66 +0,0 @@
1
-
2
- # from typing import List
3
- # from typing import Literal
4
- # from langchain.prompts import ChatPromptTemplate
5
- # from langchain_core.utils.function_calling import convert_to_openai_function
6
- # from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
7
-
8
- # # https://livingdatalab.com/posts/2023-11-05-openai-function-calling-with-langchain.html
9
-
10
- # class Location(BaseModel):
11
- # country:str = Field(...,description="The country if directly mentioned or inferred from the location (cities, regions, adresses), ex: France, USA, ...")
12
- # location:str = Field(...,description="The specific place if mentioned (cities, regions, addresses), ex: Marseille, New York, Wisconsin, ...")
13
-
14
- # class QueryAnalysis(BaseModel):
15
- # """Analyzing the user query"""
16
-
17
- # language: str = Field(
18
- # description="Find the language of the query in full words (ex: French, English, Spanish, ...), defaults to English"
19
- # )
20
- # intent: str = Field(
21
- # enum=[
22
- # "Environmental impacts of AI",
23
- # "Geolocated info about climate change",
24
- # "Climate change",
25
- # "Biodiversity",
26
- # "Deep sea mining",
27
- # "Chitchat",
28
- # ],
29
- # description="""
30
- # Categorize the user query in one of the following category,
31
-
32
- # Examples:
33
- # - Geolocated info about climate change: "What will be the temperature in Marseille in 2050"
34
- # - Climate change: "What is radiative forcing", "How much will
35
- # """,
36
- # )
37
- # sources: List[Literal["IPCC", "IPBES", "IPOS"]] = Field(
38
- # ...,
39
- # description="""
40
- # Given a user question choose which documents would be most relevant for answering their question,
41
- # - IPCC is for questions about climate change, energy, impacts, and everything we can find the IPCC reports
42
- # - IPBES is for questions about biodiversity and nature
43
- # - IPOS is for questions about the ocean and deep sea mining
44
-
45
- # """,
46
- # )
47
- # date: str = Field(description="The date or period mentioned, ex: 2050, between 2020 and 2050")
48
- # location:Location
49
- # # query: str = Field(
50
- # # description = """
51
- # # Translate to english and reformulate the following user message to be a short standalone question, in the context of an educational discussion about climate change.
52
- # # The reformulated question will used in a search engine
53
- # # By default, assume that the user is asking information about the last century,
54
- # # Use the following examples
55
-
56
- # # ### Examples:
57
- # # La technologie nous sauvera-t-elle ? -> Can technology help humanity mitigate the effects of climate change?
58
- # # what are our reserves in fossil fuel? -> What are the current reserves of fossil fuels and how long will they last?
59
- # # what are the main causes of climate change? -> What are the main causes of climate change in the last century?
60
-
61
- # # Question in English:
62
- # # """
63
- # # )
64
-
65
- # openai_functions = [convert_to_openai_function(QueryAnalysis)]
66
- # llm2 = llm.bind(functions = openai_functions,function_call={"name":"QueryAnalysis"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/set_defaults.py DELETED
@@ -1,13 +0,0 @@
1
- def set_defaults(state):
2
- print("---- Setting defaults ----")
3
-
4
- if not state["audience"] or state["audience"] is None:
5
- state.update({"audience": "experts"})
6
-
7
- sources_input = state["sources_input"] if "sources_input" in state else ["auto"]
8
- state.update({"sources_input": sources_input})
9
-
10
- # if not state["sources_input"] or state["sources_input"] is None:
11
- # state.update({"sources_input": ["auto"]})
12
-
13
- return state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/chains/translation.py DELETED
@@ -1,42 +0,0 @@
1
-
2
- from langchain_core.pydantic_v1 import BaseModel, Field
3
- from typing import List
4
- from typing import Literal
5
- from langchain.prompts import ChatPromptTemplate
6
- from langchain_core.utils.function_calling import convert_to_openai_function
7
- from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
8
-
9
-
10
- class Translation(BaseModel):
11
- """Analyzing the user message input"""
12
-
13
- translation: str = Field(
14
- description="Translate the message input to English",
15
- )
16
-
17
-
18
- def make_translation_chain(llm):
19
-
20
- openai_functions = [convert_to_openai_function(Translation)]
21
- llm_with_functions = llm.bind(functions = openai_functions,function_call={"name":"Translation"})
22
-
23
- prompt = ChatPromptTemplate.from_messages([
24
- ("system", "You are a helpful assistant, you will translate the user input message to English using the function provided"),
25
- ("user", "input: {input}")
26
- ])
27
-
28
- chain = prompt | llm_with_functions | JsonOutputFunctionsParser()
29
- return chain
30
-
31
-
32
- def make_translation_node(llm):
33
- translation_chain = make_translation_chain(llm)
34
-
35
- def translate_query(state):
36
- print("---- Translate query ----")
37
-
38
- user_input = state["user_input"]
39
- translation = translation_chain.invoke({"input":user_input})
40
- return {"query":translation["translation"]}
41
-
42
- return translate_query
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/embeddings.py CHANGED
@@ -2,7 +2,7 @@
2
  from langchain_community.embeddings import HuggingFaceBgeEmbeddings
3
  from langchain_community.embeddings import HuggingFaceEmbeddings
4
 
5
- def get_embeddings_function(version = "v1.2",query_instruction = "Represent this sentence for searching relevant passages: "):
6
 
7
  if version == "v1.2":
8
 
@@ -10,12 +10,12 @@ def get_embeddings_function(version = "v1.2",query_instruction = "Represent this
10
  # Best embedding model at a reasonable size at the moment (2023-11-22)
11
 
12
  model_name = "BAAI/bge-base-en-v1.5"
13
- encode_kwargs = {'normalize_embeddings': True,"show_progress_bar":False} # set True to compute cosine similarity
14
  print("Loading embeddings model: ", model_name)
15
  embeddings_function = HuggingFaceBgeEmbeddings(
16
  model_name=model_name,
17
  encode_kwargs=encode_kwargs,
18
- query_instruction=query_instruction,
19
  )
20
 
21
  else:
@@ -23,6 +23,3 @@ def get_embeddings_function(version = "v1.2",query_instruction = "Represent this
23
  embeddings_function = HuggingFaceEmbeddings(model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1")
24
 
25
  return embeddings_function
26
-
27
-
28
-
 
2
  from langchain_community.embeddings import HuggingFaceBgeEmbeddings
3
  from langchain_community.embeddings import HuggingFaceEmbeddings
4
 
5
+ def get_embeddings_function(version = "v1.2"):
6
 
7
  if version == "v1.2":
8
 
 
10
  # Best embedding model at a reasonable size at the moment (2023-11-22)
11
 
12
  model_name = "BAAI/bge-base-en-v1.5"
13
+ encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
14
  print("Loading embeddings model: ", model_name)
15
  embeddings_function = HuggingFaceBgeEmbeddings(
16
  model_name=model_name,
17
  encode_kwargs=encode_kwargs,
18
+ query_instruction="Represent this sentence for searching relevant passages: "
19
  )
20
 
21
  else:
 
23
  embeddings_function = HuggingFaceEmbeddings(model_name = "sentence-transformers/multi-qa-mpnet-base-dot-v1")
24
 
25
  return embeddings_function
 
 
 
climateqa/engine/graph.py DELETED
@@ -1,192 +0,0 @@
1
- import sys
2
- import os
3
- from contextlib import contextmanager
4
-
5
- from langchain.schema import Document
6
- from langgraph.graph import END, StateGraph
7
- from langchain_core.runnables.graph import CurveStyle, MermaidDrawMethod
8
-
9
- from typing_extensions import TypedDict
10
- from typing import List, Dict
11
-
12
- from IPython.display import display, HTML, Image
13
-
14
- from .chains.answer_chitchat import make_chitchat_node
15
- from .chains.answer_ai_impact import make_ai_impact_node
16
- from .chains.query_transformation import make_query_transform_node
17
- from .chains.translation import make_translation_node
18
- from .chains.intent_categorization import make_intent_categorization_node
19
- from .chains.retrieve_documents import make_retriever_node
20
- from .chains.answer_rag import make_rag_node
21
- from .chains.graph_retriever import make_graph_retriever_node
22
- from .chains.chitchat_categorization import make_chitchat_intent_categorization_node
23
- # from .chains.set_defaults import set_defaults
24
-
25
- class GraphState(TypedDict):
26
- """
27
- Represents the state of our graph.
28
- """
29
- user_input : str
30
- language : str
31
- intent : str
32
- search_graphs_chitchat : bool
33
- query: str
34
- remaining_questions : List[dict]
35
- n_questions : int
36
- answer: str
37
- audience: str = "experts"
38
- sources_input: List[str] = ["IPCC","IPBES"]
39
- relevant_content_sources: List[str] = ["Figures (IPCC/IPBES)"]
40
- sources_auto: bool = True
41
- min_year: int = 1960
42
- max_year: int = None
43
- documents: List[Document]
44
- related_contents : Dict[str,Document]
45
- recommended_content : List[Document]
46
- search_only : bool = False
47
- reports : List[str] = []
48
-
49
- def search(state): #TODO
50
- return state
51
-
52
- def answer_search(state):#TODO
53
- return state
54
-
55
- def route_intent(state):
56
- intent = state["intent"]
57
- if intent in ["chitchat","esg"]:
58
- return "answer_chitchat"
59
- # elif intent == "ai_impact":
60
- # return "answer_ai_impact"
61
- else:
62
- # Search route
63
- return "search"
64
-
65
- def chitchat_route_intent(state):
66
- intent = state["search_graphs_chitchat"]
67
- if intent is True:
68
- return "retrieve_graphs_chitchat"
69
- elif intent is False:
70
- return END
71
-
72
- def route_translation(state):
73
- if state["language"].lower() == "english":
74
- return "transform_query"
75
- else:
76
- return "translate_query"
77
-
78
- def route_based_on_relevant_docs(state,threshold_docs=0.2):
79
- docs = [x for x in state["documents"] if x.metadata["reranking_score"] > threshold_docs]
80
- if len(docs) > 0:
81
- return "answer_rag"
82
- else:
83
- return "answer_rag_no_docs"
84
-
85
- def route_retrieve_documents(state):
86
- if len(state["remaining_questions"]) == 0 and state["search_only"] :
87
- return END
88
- elif len(state["remaining_questions"]) > 0:
89
- return "retrieve_documents"
90
- else:
91
- return "answer_search"
92
-
93
- def make_id_dict(values):
94
- return {k:k for k in values}
95
-
96
- def make_graph_agent(llm, vectorstore_ipcc, vectorstore_graphs, reranker, threshold_docs=0.2):
97
-
98
- workflow = StateGraph(GraphState)
99
-
100
- # Define the node functions
101
- categorize_intent = make_intent_categorization_node(llm)
102
- transform_query = make_query_transform_node(llm)
103
- translate_query = make_translation_node(llm)
104
- answer_chitchat = make_chitchat_node(llm)
105
- answer_ai_impact = make_ai_impact_node(llm)
106
- retrieve_documents = make_retriever_node(vectorstore_ipcc, reranker, llm)
107
- retrieve_graphs = make_graph_retriever_node(vectorstore_graphs, reranker)
108
- answer_rag = make_rag_node(llm, with_docs=True)
109
- answer_rag_no_docs = make_rag_node(llm, with_docs=False)
110
- chitchat_categorize_intent = make_chitchat_intent_categorization_node(llm)
111
-
112
- # Define the nodes
113
- # workflow.add_node("set_defaults", set_defaults)
114
- workflow.add_node("categorize_intent", categorize_intent)
115
- workflow.add_node("search", search)
116
- workflow.add_node("answer_search", answer_search)
117
- workflow.add_node("transform_query", transform_query)
118
- workflow.add_node("translate_query", translate_query)
119
- workflow.add_node("answer_chitchat", answer_chitchat)
120
- workflow.add_node("chitchat_categorize_intent", chitchat_categorize_intent)
121
- workflow.add_node("retrieve_graphs", retrieve_graphs)
122
- workflow.add_node("retrieve_graphs_chitchat", retrieve_graphs)
123
- workflow.add_node("retrieve_documents", retrieve_documents)
124
- workflow.add_node("answer_rag", answer_rag)
125
- workflow.add_node("answer_rag_no_docs", answer_rag_no_docs)
126
-
127
- # Entry point
128
- workflow.set_entry_point("categorize_intent")
129
-
130
- # CONDITIONAL EDGES
131
- workflow.add_conditional_edges(
132
- "categorize_intent",
133
- route_intent,
134
- make_id_dict(["answer_chitchat","search"])
135
- )
136
-
137
- workflow.add_conditional_edges(
138
- "chitchat_categorize_intent",
139
- chitchat_route_intent,
140
- make_id_dict(["retrieve_graphs_chitchat", END])
141
- )
142
-
143
- workflow.add_conditional_edges(
144
- "search",
145
- route_translation,
146
- make_id_dict(["translate_query","transform_query"])
147
- )
148
- workflow.add_conditional_edges(
149
- "retrieve_documents",
150
- # lambda state : "retrieve_documents" if len(state["remaining_questions"]) > 0 else "answer_search",
151
- route_retrieve_documents,
152
- make_id_dict([END,"retrieve_documents","answer_search"])
153
- )
154
-
155
- workflow.add_conditional_edges(
156
- "answer_search",
157
- lambda x : route_based_on_relevant_docs(x,threshold_docs=threshold_docs),
158
- make_id_dict(["answer_rag","answer_rag_no_docs"])
159
- )
160
- workflow.add_conditional_edges(
161
- "transform_query",
162
- lambda state : "retrieve_graphs" if "Graphs (OurWorldInData)" in state["relevant_content_sources"] else END,
163
- make_id_dict(["retrieve_graphs", END])
164
- )
165
-
166
- # Define the edges
167
- workflow.add_edge("translate_query", "transform_query")
168
- workflow.add_edge("transform_query", "retrieve_documents")
169
-
170
- workflow.add_edge("retrieve_graphs", END)
171
- workflow.add_edge("answer_rag", END)
172
- workflow.add_edge("answer_rag_no_docs", END)
173
- workflow.add_edge("answer_chitchat", "chitchat_categorize_intent")
174
- workflow.add_edge("retrieve_graphs_chitchat", END)
175
-
176
-
177
- # Compile
178
- app = workflow.compile()
179
- return app
180
-
181
-
182
-
183
-
184
- def display_graph(app):
185
-
186
- display(
187
- Image(
188
- app.get_graph(xray = True).draw_mermaid_png(
189
- draw_method=MermaidDrawMethod.API,
190
- )
191
- )
192
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/graph_retriever.py DELETED
@@ -1,88 +0,0 @@
1
- from langchain_core.retrievers import BaseRetriever
2
- from langchain_core.documents.base import Document
3
- from langchain_core.vectorstores import VectorStore
4
- from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
5
-
6
- from typing import List
7
-
8
- # class GraphRetriever(BaseRetriever):
9
- # vectorstore:VectorStore
10
- # sources:list = ["OWID"] # plus tard ajouter OurWorldInData # faudra integrate avec l'autre retriever
11
- # threshold:float = 0.5
12
- # k_total:int = 10
13
-
14
- # def _get_relevant_documents(
15
- # self, query: str, *, run_manager: CallbackManagerForRetrieverRun
16
- # ) -> List[Document]:
17
-
18
- # # Check if all elements in the list are IEA or OWID
19
- # assert isinstance(self.sources,list)
20
- # assert self.sources
21
- # assert any([x in ["OWID"] for x in self.sources])
22
-
23
- # # Prepare base search kwargs
24
- # filters = {}
25
-
26
- # filters["source"] = {"$in": self.sources}
27
-
28
- # docs = self.vectorstore.similarity_search_with_score(query=query, filter=filters, k=self.k_total)
29
-
30
- # # Filter if scores are below threshold
31
- # docs = [x for x in docs if x[1] > self.threshold]
32
-
33
- # # Remove duplicate documents
34
- # unique_docs = []
35
- # seen_docs = []
36
- # for i, doc in enumerate(docs):
37
- # if doc[0].page_content not in seen_docs:
38
- # unique_docs.append(doc)
39
- # seen_docs.append(doc[0].page_content)
40
-
41
- # # Add score to metadata
42
- # results = []
43
- # for i,(doc,score) in enumerate(unique_docs):
44
- # doc.metadata["similarity_score"] = score
45
- # doc.metadata["content"] = doc.page_content
46
- # results.append(doc)
47
-
48
- # return results
49
-
50
- async def retrieve_graphs(
51
- query: str,
52
- vectorstore:VectorStore,
53
- sources:list = ["OWID"], # plus tard ajouter OurWorldInData # faudra integrate avec l'autre retriever
54
- threshold:float = 0.5,
55
- k_total:int = 10,
56
- )-> List[Document]:
57
-
58
- # Check if all elements in the list are IEA or OWID
59
- assert isinstance(sources,list)
60
- assert sources
61
- assert any([x in ["OWID"] for x in sources])
62
-
63
- # Prepare base search kwargs
64
- filters = {}
65
-
66
- filters["source"] = {"$in": sources}
67
-
68
- docs = vectorstore.similarity_search_with_score(query=query, filter=filters, k=k_total)
69
-
70
- # Filter if scores are below threshold
71
- docs = [x for x in docs if x[1] > threshold]
72
-
73
- # Remove duplicate documents
74
- unique_docs = []
75
- seen_docs = []
76
- for i, doc in enumerate(docs):
77
- if doc[0].page_content not in seen_docs:
78
- unique_docs.append(doc)
79
- seen_docs.append(doc[0].page_content)
80
-
81
- # Add score to metadata
82
- results = []
83
- for i,(doc,score) in enumerate(unique_docs):
84
- doc.metadata["similarity_score"] = score
85
- doc.metadata["content"] = doc.page_content
86
- results.append(doc)
87
-
88
- return results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/keywords.py CHANGED
@@ -11,12 +11,10 @@ class KeywordsOutput(BaseModel):
11
 
12
  keywords: list = Field(
13
  description="""
14
- Generate 1 or 2 relevant keywords from the user query to ask a search engine for scientific research papers. Answer only with English keywords.
15
- Do not use special characters or accents.
16
 
17
  Example:
18
  - "What is the impact of deep sea mining ?" -> ["deep sea mining"]
19
- - "Quel est l'impact de l'exploitation minière en haute mer ?" -> ["deep sea mining"]
20
  - "How will El Nino be impacted by climate change" -> ["el nino"]
21
  - "Is climate change a hoax" -> [Climate change","hoax"]
22
  """
 
11
 
12
  keywords: list = Field(
13
  description="""
14
+ Generate 1 or 2 relevant keywords from the user query to ask a search engine for scientific research papers.
 
15
 
16
  Example:
17
  - "What is the impact of deep sea mining ?" -> ["deep sea mining"]
 
18
  - "How will El Nino be impacted by climate change" -> ["el nino"]
19
  - "Is climate change a hoax" -> [Climate change","hoax"]
20
  """
climateqa/engine/llm/__init__.py CHANGED
@@ -1,6 +1,5 @@
1
  from climateqa.engine.llm.openai import get_llm as get_openai_llm
2
  from climateqa.engine.llm.azure import get_llm as get_azure_llm
3
- from climateqa.engine.llm.ollama import get_llm as get_ollama_llm
4
 
5
 
6
  def get_llm(provider="openai",**kwargs):
@@ -9,8 +8,6 @@ def get_llm(provider="openai",**kwargs):
9
  return get_openai_llm(**kwargs)
10
  elif provider == "azure":
11
  return get_azure_llm(**kwargs)
12
- elif provider == "ollama":
13
- return get_ollama_llm(**kwargs)
14
  else:
15
  raise ValueError(f"Unknown provider: {provider}")
16
 
 
1
  from climateqa.engine.llm.openai import get_llm as get_openai_llm
2
  from climateqa.engine.llm.azure import get_llm as get_azure_llm
 
3
 
4
 
5
  def get_llm(provider="openai",**kwargs):
 
8
  return get_openai_llm(**kwargs)
9
  elif provider == "azure":
10
  return get_azure_llm(**kwargs)
 
 
11
  else:
12
  raise ValueError(f"Unknown provider: {provider}")
13
 
climateqa/engine/llm/ollama.py DELETED
@@ -1,6 +0,0 @@
1
-
2
-
3
- from langchain_community.llms import Ollama
4
-
5
- def get_llm(model="llama3", **kwargs):
6
- return Ollama(model=model, **kwargs)
 
 
 
 
 
 
 
climateqa/engine/llm/openai.py CHANGED
@@ -7,7 +7,7 @@ try:
7
  except Exception:
8
  pass
9
 
10
- def get_llm(model="gpt-4o-mini",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
11
 
12
  llm = ChatOpenAI(
13
  model=model,
 
7
  except Exception:
8
  pass
9
 
10
+ def get_llm(model="gpt-3.5-turbo-0125",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
11
 
12
  llm = ChatOpenAI(
13
  model=model,
climateqa/engine/{chains/prompts.py → prompts.py} RENAMED
@@ -56,7 +56,7 @@ Passages:
56
  {context}
57
 
58
  -----------------------
59
- Question: {query} - Explained to {audience}
60
  Answer in {language} with the passages citations:
61
  """
62
 
@@ -137,7 +137,7 @@ Guidelines:
137
  - If the question is not related to environmental issues, never never answer it. Say it's not your role.
138
  - Make paragraphs by starting new lines to make your answers more readable.
139
 
140
- Question: {query}
141
  Answer in {language}:
142
  """
143
 
@@ -147,27 +147,4 @@ audience_prompts = {
147
  "children": "6 year old children that don't know anything about science and climate change and need metaphors to learn",
148
  "general": "the general public who know the basics in science and climate change and want to learn more about it without technical terms. Still use references to passages.",
149
  "experts": "expert and climate scientists that are not afraid of technical terms",
150
- }
151
-
152
-
153
- answer_prompt_graph_template = """
154
- Given the user question and a list of graphs which are related to the question, rank the graphs based on relevance to the user question. ALWAYS follow the guidelines given below.
155
-
156
- ### Guidelines ###
157
- - Keep all the graphs that are given to you.
158
- - NEVER modify the graph HTML embedding, the category or the source leave them exactly as they are given.
159
- - Return the ranked graphs as a list of dictionaries with keys 'embedding', 'category', and 'source'.
160
- - Return a valid JSON output.
161
-
162
- -----------------------
163
- User question:
164
- {query}
165
-
166
- Graphs and their HTML embedding:
167
- {recommended_content}
168
-
169
- -----------------------
170
- {format_instructions}
171
-
172
- Output the result as json with a key "graphs" containing a list of dictionaries of the relevant graphs with keys 'embedding', 'category', and 'source'. Do not modify the graph HTML embedding, the category or the source. Do not put any message or text before or after the JSON output.
173
- """
 
56
  {context}
57
 
58
  -----------------------
59
+ Question: {question} - Explained to {audience}
60
  Answer in {language} with the passages citations:
61
  """
62
 
 
137
  - If the question is not related to environmental issues, never never answer it. Say it's not your role.
138
  - Make paragraphs by starting new lines to make your answers more readable.
139
 
140
+ Question: {question}
141
  Answer in {language}:
142
  """
143
 
 
147
  "children": "6 year old children that don't know anything about science and climate change and need metaphors to learn",
148
  "general": "the general public who know the basics in science and climate change and want to learn more about it without technical terms. Still use references to passages.",
149
  "experts": "expert and climate scientists that are not afraid of technical terms",
150
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/{chains/answer_rag.py → rag.py} RENAMED
@@ -2,14 +2,15 @@ from operator import itemgetter
2
 
3
  from langchain_core.prompts import ChatPromptTemplate
4
  from langchain_core.output_parsers import StrOutputParser
 
5
  from langchain_core.prompts.prompt import PromptTemplate
6
  from langchain_core.prompts.base import format_document
7
 
8
- from climateqa.engine.chains.prompts import answer_prompt_template,answer_prompt_without_docs_template,answer_prompt_images_template
9
- from climateqa.engine.chains.prompts import papers_prompt_template
10
- import time
11
- from ..utils import rename_chain, pass_values
12
-
13
 
14
  DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
15
 
@@ -39,51 +40,72 @@ def get_text_docs(x):
39
  def get_image_docs(x):
40
  return [doc for doc in x if doc.metadata["chunk_type"] == "image"]
41
 
42
- def make_rag_chain(llm):
 
 
 
43
  prompt = ChatPromptTemplate.from_template(answer_prompt_template)
44
- chain = ({
45
- "context":lambda x : _combine_documents(x["documents"]),
46
- "context_length":lambda x : print("CONTEXT LENGTH : " , len(_combine_documents(x["documents"]))),
47
- "query":itemgetter("query"),
48
- "language":itemgetter("language"),
49
- "audience":itemgetter("audience"),
50
- } | prompt | llm | StrOutputParser())
51
- return chain
52
 
53
- def make_rag_chain_without_docs(llm):
54
- prompt = ChatPromptTemplate.from_template(answer_prompt_without_docs_template)
55
- chain = prompt | llm | StrOutputParser()
56
- return chain
57
 
58
- def make_rag_node(llm,with_docs = True):
 
 
 
59
 
60
- if with_docs:
61
- rag_chain = make_rag_chain(llm)
62
- else:
63
- rag_chain = make_rag_chain_without_docs(llm)
64
 
65
- async def answer_rag(state,config):
66
- print("---- Answer RAG ----")
67
- start_time = time.time()
 
 
 
68
 
69
- answer = await rag_chain.ainvoke(state,config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
- end_time = time.time()
72
- elapsed_time = end_time - start_time
73
- print("RAG elapsed time: ", elapsed_time)
74
- print("Answer size : ", len(answer))
75
- # print(f"\n\nAnswer:\n{answer}")
76
-
77
- return {"answer":answer}
78
 
79
- return answer_rag
80
 
 
 
 
81
 
 
82
 
83
 
84
  def make_rag_papers_chain(llm):
85
 
86
  prompt = ChatPromptTemplate.from_template(papers_prompt_template)
 
87
  input_documents = {
88
  "context":lambda x : _combine_documents(x["docs"]),
89
  **pass_values(["question","language"])
@@ -109,4 +131,4 @@ def make_illustration_chain(llm):
109
  }
110
 
111
  illustration_chain = input_description_images | prompt_with_images | llm | StrOutputParser()
112
- return illustration_chain
 
2
 
3
  from langchain_core.prompts import ChatPromptTemplate
4
  from langchain_core.output_parsers import StrOutputParser
5
+ from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableBranch
6
  from langchain_core.prompts.prompt import PromptTemplate
7
  from langchain_core.prompts.base import format_document
8
 
9
+ from climateqa.engine.reformulation import make_reformulation_chain
10
+ from climateqa.engine.prompts import answer_prompt_template,answer_prompt_without_docs_template,answer_prompt_images_template
11
+ from climateqa.engine.prompts import papers_prompt_template
12
+ from climateqa.engine.utils import pass_values, flatten_dict,prepare_chain,rename_chain
13
+ from climateqa.engine.keywords import make_keywords_chain
14
 
15
  DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
16
 
 
40
  def get_image_docs(x):
41
  return [doc for doc in x if doc.metadata["chunk_type"] == "image"]
42
 
43
+
44
+ def make_rag_chain(retriever,llm):
45
+
46
+ # Construct the prompt
47
  prompt = ChatPromptTemplate.from_template(answer_prompt_template)
48
+ prompt_without_docs = ChatPromptTemplate.from_template(answer_prompt_without_docs_template)
 
 
 
 
 
 
 
49
 
50
+ # ------- CHAIN 0 - Reformulation
51
+ reformulation = make_reformulation_chain(llm)
52
+ reformulation = prepare_chain(reformulation,"reformulation")
 
53
 
54
+ # ------- Find all keywords from the reformulated query
55
+ keywords = make_keywords_chain(llm)
56
+ keywords = {"keywords":itemgetter("question") | keywords}
57
+ keywords = prepare_chain(keywords,"keywords")
58
 
59
+ # ------- CHAIN 1
60
+ # Retrieved documents
61
+ find_documents = {"docs": itemgetter("question") | retriever} | RunnablePassthrough()
62
+ find_documents = prepare_chain(find_documents,"find_documents")
63
 
64
+ # ------- CHAIN 2
65
+ # Construct inputs for the llm
66
+ input_documents = {
67
+ "context":lambda x : _combine_documents(x["docs"]),
68
+ **pass_values(["question","audience","language","keywords"])
69
+ }
70
 
71
+ # ------- CHAIN 3
72
+ # Bot answer
73
+ llm_final = rename_chain(llm,"answer")
74
+
75
+ answer_with_docs = {
76
+ "answer": input_documents | prompt | llm_final | StrOutputParser(),
77
+ **pass_values(["question","audience","language","query","docs","keywords"]),
78
+ }
79
+
80
+ answer_without_docs = {
81
+ "answer": prompt_without_docs | llm_final | StrOutputParser(),
82
+ **pass_values(["question","audience","language","query","docs","keywords"]),
83
+ }
84
+
85
+ # def has_images(x):
86
+ # image_docs = [doc for doc in x["docs"] if doc.metadata["chunk_type"]=="image"]
87
+ # return len(image_docs) > 0
88
 
89
+ def has_docs(x):
90
+ return len(x["docs"]) > 0
91
+
92
+ answer = RunnableBranch(
93
+ (lambda x: has_docs(x), answer_with_docs),
94
+ answer_without_docs,
95
+ )
96
 
 
97
 
98
+ # ------- FINAL CHAIN
99
+ # Build the final chain
100
+ rag_chain = reformulation | keywords | find_documents | answer
101
 
102
+ return rag_chain
103
 
104
 
105
  def make_rag_papers_chain(llm):
106
 
107
  prompt = ChatPromptTemplate.from_template(papers_prompt_template)
108
+
109
  input_documents = {
110
  "context":lambda x : _combine_documents(x["docs"]),
111
  **pass_values(["question","language"])
 
131
  }
132
 
133
  illustration_chain = input_description_images | prompt_with_images | llm | StrOutputParser()
134
+ return illustration_chain
climateqa/engine/{chains/reformulation.py → reformulation.py} RENAMED
@@ -3,7 +3,7 @@ from langchain.output_parsers.structured import StructuredOutputParser, Response
3
  from langchain_core.prompts import PromptTemplate
4
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableBranch
5
 
6
- from climateqa.engine.chains.prompts import reformulation_prompt_template
7
  from climateqa.engine.utils import pass_values, flatten_dict
8
 
9
 
 
3
  from langchain_core.prompts import PromptTemplate
4
  from langchain_core.runnables import RunnablePassthrough, RunnableLambda, RunnableBranch
5
 
6
+ from climateqa.engine.prompts import reformulation_prompt_template
7
  from climateqa.engine.utils import pass_values, flatten_dict
8
 
9
 
climateqa/engine/reranker.py DELETED
@@ -1,50 +0,0 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from scipy.special import expit, logit
4
- from rerankers import Reranker
5
- from sentence_transformers import CrossEncoder
6
-
7
- load_dotenv()
8
-
9
- def get_reranker(model = "nano", cohere_api_key = None):
10
-
11
- assert model in ["nano","tiny","small","large", "jina"]
12
-
13
- if model == "nano":
14
- reranker = Reranker('ms-marco-TinyBERT-L-2-v2', model_type='flashrank')
15
- elif model == "tiny":
16
- reranker = Reranker('ms-marco-MiniLM-L-12-v2', model_type='flashrank')
17
- elif model == "small":
18
- reranker = Reranker("mixedbread-ai/mxbai-rerank-xsmall-v1", model_type='cross-encoder')
19
- elif model == "large":
20
- if cohere_api_key is None:
21
- cohere_api_key = os.environ["COHERE_API_KEY"]
22
- reranker = Reranker("cohere", lang='en', api_key = cohere_api_key)
23
- elif model == "jina":
24
- # Reached token quota so does not work
25
- reranker = Reranker("jina-reranker-v2-base-multilingual", api_key = os.getenv("JINA_RERANKER_API_KEY"))
26
- # marche pas sans gpu ? et anyways returns with another structure donc faudrait changer le code du retriever node
27
- # reranker = CrossEncoder("jinaai/jina-reranker-v2-base-multilingual", automodel_args={"torch_dtype": "auto"}, trust_remote_code=True,)
28
- return reranker
29
-
30
-
31
-
32
- def rerank_docs(reranker,docs,query):
33
- if docs == []:
34
- return []
35
-
36
- # Get a list of texts from langchain docs
37
- input_docs = [x.page_content for x in docs]
38
-
39
- # Rerank using rerankers library
40
- results = reranker.rank(query=query, docs=input_docs)
41
-
42
- # Prepare langchain list of docs
43
- docs_reranked = []
44
- for result in results.results:
45
- doc_id = result.document.doc_id
46
- doc = docs[doc_id]
47
- doc.metadata["reranking_score"] = result.score
48
- doc.metadata["query_used_for_retrieval"] = query
49
- docs_reranked.append(doc)
50
- return docs_reranked
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/engine/retriever.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/langchain-ai/langchain/issues/8623
2
+
3
+ import pandas as pd
4
+
5
+ from langchain_core.retrievers import BaseRetriever
6
+ from langchain_core.vectorstores import VectorStoreRetriever
7
+ from langchain_core.documents.base import Document
8
+ from langchain_core.vectorstores import VectorStore
9
+ from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
10
+
11
+ from typing import List
12
+ from pydantic import Field
13
+
14
+ class ClimateQARetriever(BaseRetriever):
15
+ vectorstore:VectorStore
16
+ sources:list = ["IPCC","IPBES","IPOS"]
17
+ reports:list = []
18
+ threshold:float = 0.6
19
+ k_summary:int = 3
20
+ k_total:int = 10
21
+ namespace:str = "vectors",
22
+ min_size:int = 200,
23
+
24
+
25
+ def _get_relevant_documents(
26
+ self, query: str, *, run_manager: CallbackManagerForRetrieverRun
27
+ ) -> List[Document]:
28
+
29
+ # Check if all elements in the list are either IPCC or IPBES
30
+ assert isinstance(self.sources,list)
31
+ assert all([x in ["IPCC","IPBES","IPOS"] for x in self.sources])
32
+ assert self.k_total > self.k_summary, "k_total should be greater than k_summary"
33
+
34
+ # Prepare base search kwargs
35
+ filters = {}
36
+
37
+ if len(self.reports) > 0:
38
+ filters["short_name"] = {"$in":self.reports}
39
+ else:
40
+ filters["source"] = { "$in":self.sources}
41
+
42
+ # Search for k_summary documents in the summaries dataset
43
+ filters_summaries = {
44
+ **filters,
45
+ "report_type": { "$in":["SPM"]},
46
+ }
47
+
48
+ docs_summaries = self.vectorstore.similarity_search_with_score(query=query,filter = filters_summaries,k = self.k_summary)
49
+ docs_summaries = [x for x in docs_summaries if x[1] > self.threshold]
50
+
51
+ # Search for k_total - k_summary documents in the full reports dataset
52
+ filters_full = {
53
+ **filters,
54
+ "report_type": { "$nin":["SPM"]},
55
+ }
56
+ k_full = self.k_total - len(docs_summaries)
57
+ docs_full = self.vectorstore.similarity_search_with_score(query=query,filter = filters_full,k = k_full)
58
+
59
+ # Concatenate documents
60
+ docs = docs_summaries + docs_full
61
+
62
+ # Filter if scores are below threshold
63
+ docs = [x for x in docs if len(x[0].page_content) > self.min_size]
64
+ # docs = [x for x in docs if x[1] > self.threshold]
65
+
66
+ # Add score to metadata
67
+ results = []
68
+ for i,(doc,score) in enumerate(docs):
69
+ doc.metadata["similarity_score"] = score
70
+ doc.metadata["content"] = doc.page_content
71
+ doc.metadata["page_number"] = int(doc.metadata["page_number"]) + 1
72
+ # doc.page_content = f"""Doc {i+1} - {doc.metadata['short_name']}: {doc.page_content}"""
73
+ results.append(doc)
74
+
75
+ # Sort by score
76
+ # results = sorted(results,key = lambda x : x.metadata["similarity_score"],reverse = True)
77
+
78
+ return results
79
+
80
+
81
+
82
+
83
+ # def filter_summaries(df,k_summary = 3,k_total = 10):
84
+ # # assert source in ["IPCC","IPBES","ALL"], "source arg should be in (IPCC,IPBES,ALL)"
85
+
86
+ # # # Filter by source
87
+ # # if source == "IPCC":
88
+ # # df = df.loc[df["source"]=="IPCC"]
89
+ # # elif source == "IPBES":
90
+ # # df = df.loc[df["source"]=="IPBES"]
91
+ # # else:
92
+ # # pass
93
+
94
+ # # Separate summaries and full reports
95
+ # df_summaries = df.loc[df["report_type"].isin(["SPM","TS"])]
96
+ # df_full = df.loc[~df["report_type"].isin(["SPM","TS"])]
97
+
98
+ # # Find passages from summaries dataset
99
+ # passages_summaries = df_summaries.head(k_summary)
100
+
101
+ # # Find passages from full reports dataset
102
+ # passages_fullreports = df_full.head(k_total - len(passages_summaries))
103
+
104
+ # # Concatenate passages
105
+ # passages = pd.concat([passages_summaries,passages_fullreports],axis = 0,ignore_index = True)
106
+ # return passages
107
+
108
+
109
+
110
+
111
+ # def retrieve_with_summaries(query,retriever,k_summary = 3,k_total = 10,sources = ["IPCC","IPBES"],max_k = 100,threshold = 0.555,as_dict = True,min_length = 300):
112
+ # assert max_k > k_total
113
+
114
+ # validated_sources = ["IPCC","IPBES"]
115
+ # sources = [x for x in sources if x in validated_sources]
116
+ # filters = {
117
+ # "source": { "$in": sources },
118
+ # }
119
+ # print(filters)
120
+
121
+ # # Retrieve documents
122
+ # docs = retriever.retrieve(query,top_k = max_k,filters = filters)
123
+
124
+ # # Filter by score
125
+ # docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs if x.score > threshold]
126
+
127
+ # if len(docs) == 0:
128
+ # return []
129
+ # res = pd.DataFrame(docs)
130
+ # passages_df = filter_summaries(res,k_summary,k_total)
131
+ # if as_dict:
132
+ # contents = passages_df["content"].tolist()
133
+ # meta = passages_df.drop(columns = ["content"]).to_dict(orient = "records")
134
+ # passages = []
135
+ # for i in range(len(contents)):
136
+ # passages.append({"content":contents[i],"meta":meta[i]})
137
+ # return passages
138
+ # else:
139
+ # return passages_df
140
+
141
+
142
+
143
+ # def retrieve(query,sources = ["IPCC"],threshold = 0.555,k = 10):
144
+
145
+
146
+ # print("hellooooo")
147
+
148
+ # # Reformulate queries
149
+ # reformulated_query,language = reformulate(query)
150
+
151
+ # print(reformulated_query)
152
+
153
+ # # Retrieve documents
154
+ # passages = retrieve_with_summaries(reformulated_query,retriever,k_total = k,k_summary = 3,as_dict = True,sources = sources,threshold = threshold)
155
+ # response = {
156
+ # "query":query,
157
+ # "reformulated_query":reformulated_query,
158
+ # "language":language,
159
+ # "sources":passages,
160
+ # "prompts":{"init_prompt":init_prompt,"sources_prompt":sources_prompt},
161
+ # }
162
+ # return response
163
+
climateqa/engine/utils.py CHANGED
@@ -1,15 +1,8 @@
1
  from operator import itemgetter
2
  from typing import Any, Dict, Iterable, Tuple
3
- import tiktoken
4
  from langchain_core.runnables import RunnablePassthrough
5
 
6
 
7
- def num_tokens_from_string(string: str, encoding_name: str = "cl100k_base") -> int:
8
- encoding = tiktoken.get_encoding(encoding_name)
9
- num_tokens = len(encoding.encode(string))
10
- return num_tokens
11
-
12
-
13
  def pass_values(x):
14
  if not isinstance(x, list):
15
  x = [x]
@@ -74,13 +67,3 @@ def flatten_dict(
74
  """
75
  flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
76
  return flat_dict
77
-
78
-
79
-
80
- async def log_event(info,name,config):
81
- """Helper function that will run a dummy chain with the given info
82
- The astream_event function will catch this chain and stream the dict info to the logger
83
- """
84
-
85
- chain = RunnablePassthrough().with_config(run_name=name)
86
- _ = await chain.ainvoke(info,config)
 
1
  from operator import itemgetter
2
  from typing import Any, Dict, Iterable, Tuple
 
3
  from langchain_core.runnables import RunnablePassthrough
4
 
5
 
 
 
 
 
 
 
6
  def pass_values(x):
7
  if not isinstance(x, list):
8
  x = [x]
 
67
  """
68
  flat_dict = {k: v for k, v in _flatten_dict(nested_dict, parent_key, sep)}
69
  return flat_dict
 
 
 
 
 
 
 
 
 
 
climateqa/engine/vectorstore.py CHANGED
@@ -13,9 +13,7 @@ except:
13
  pass
14
 
15
 
16
-
17
-
18
- def get_pinecone_vectorstore(embeddings,text_key = "content", index_name = os.getenv("PINECONE_API_INDEX")):
19
 
20
  # # initialize pinecone
21
  # pinecone.init(
@@ -29,7 +27,7 @@ def get_pinecone_vectorstore(embeddings,text_key = "content", index_name = os.ge
29
  # return vectorstore
30
 
31
  pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
32
- index = pc.Index(index_name)
33
 
34
  vectorstore = PineconeVectorstore(
35
  index, embeddings, text_key,
 
13
  pass
14
 
15
 
16
+ def get_pinecone_vectorstore(embeddings,text_key = "content"):
 
 
17
 
18
  # # initialize pinecone
19
  # pinecone.init(
 
27
  # return vectorstore
28
 
29
  pc = Pinecone(api_key=os.getenv("PINECONE_API_KEY"))
30
+ index = pc.Index(os.getenv("PINECONE_API_INDEX"))
31
 
32
  vectorstore = PineconeVectorstore(
33
  index, embeddings, text_key,
climateqa/event_handler.py DELETED
@@ -1,123 +0,0 @@
1
- from langchain_core.runnables.schema import StreamEvent
2
- from gradio import ChatMessage
3
- from climateqa.engine.chains.prompts import audience_prompts
4
- from front.utils import make_html_source,parse_output_llm_with_sources,serialize_docs,make_toolbox,generate_html_graphs
5
- import numpy as np
6
-
7
- def init_audience(audience :str) -> str:
8
- if audience == "Children":
9
- audience_prompt = audience_prompts["children"]
10
- elif audience == "General public":
11
- audience_prompt = audience_prompts["general"]
12
- elif audience == "Experts":
13
- audience_prompt = audience_prompts["experts"]
14
- else:
15
- audience_prompt = audience_prompts["experts"]
16
- return audience_prompt
17
-
18
- def handle_retrieved_documents(event: StreamEvent, history : list[ChatMessage], used_documents : list[str]) -> tuple[str, list[ChatMessage], list[str]]:
19
- """
20
- Handles the retrieved documents and returns the HTML representation of the documents
21
-
22
- Args:
23
- event (StreamEvent): The event containing the retrieved documents
24
- history (list[ChatMessage]): The current message history
25
- used_documents (list[str]): The list of used documents
26
-
27
- Returns:
28
- tuple[str, list[ChatMessage], list[str]]: The updated HTML representation of the documents, the updated message history and the updated list of used documents
29
- """
30
- try:
31
- docs = event["data"]["output"]["documents"]
32
- docs_html = []
33
- textual_docs = [d for d in docs if d.metadata["chunk_type"] == "text"]
34
- for i, d in enumerate(textual_docs, 1):
35
- if d.metadata["chunk_type"] == "text":
36
- docs_html.append(make_html_source(d, i))
37
-
38
- used_documents = used_documents + [f"{d.metadata['short_name']} - {d.metadata['name']}" for d in docs]
39
- if used_documents!=[]:
40
- history[-1].content = "Adding sources :\n\n - " + "\n - ".join(np.unique(used_documents))
41
-
42
- docs_html = "".join(docs_html)
43
-
44
- related_contents = event["data"]["output"]["related_contents"]
45
-
46
- except Exception as e:
47
- print(f"Error getting documents: {e}")
48
- print(event)
49
- return docs, docs_html, history, used_documents, related_contents
50
-
51
- def stream_answer(history: list[ChatMessage], event : StreamEvent, start_streaming : bool, answer_message_content : str)-> tuple[list[ChatMessage], bool, str]:
52
- """
53
- Handles the streaming of the answer and updates the history with the new message content
54
-
55
- Args:
56
- history (list[ChatMessage]): The current message history
57
- event (StreamEvent): The event containing the streamed answer
58
- start_streaming (bool): A flag indicating if the streaming has started
59
- new_message_content (str): The content of the new message
60
-
61
- Returns:
62
- tuple[list[ChatMessage], bool, str]: The updated history, the updated streaming flag and the updated message content
63
- """
64
- if start_streaming == False:
65
- start_streaming = True
66
- history.append(ChatMessage(role="assistant", content = ""))
67
- answer_message_content += event["data"]["chunk"].content
68
- answer_message_content = parse_output_llm_with_sources(answer_message_content)
69
- history[-1] = ChatMessage(role="assistant", content = answer_message_content)
70
- # history.append(ChatMessage(role="assistant", content = new_message_content))
71
- return history, start_streaming, answer_message_content
72
-
73
- def handle_retrieved_owid_graphs(event :StreamEvent, graphs_html: str) -> str:
74
- """
75
- Handles the retrieved OWID graphs and returns the HTML representation of the graphs
76
-
77
- Args:
78
- event (StreamEvent): The event containing the retrieved graphs
79
- graphs_html (str): The current HTML representation of the graphs
80
-
81
- Returns:
82
- str: The updated HTML representation
83
- """
84
- try:
85
- recommended_content = event["data"]["output"]["recommended_content"]
86
-
87
- unique_graphs = []
88
- seen_embeddings = set()
89
-
90
- for x in recommended_content:
91
- embedding = x.metadata["returned_content"]
92
-
93
- # Check if the embedding has already been seen
94
- if embedding not in seen_embeddings:
95
- unique_graphs.append({
96
- "embedding": embedding,
97
- "metadata": {
98
- "source": x.metadata["source"],
99
- "category": x.metadata["category"]
100
- }
101
- })
102
- # Add the embedding to the seen set
103
- seen_embeddings.add(embedding)
104
-
105
-
106
- categories = {}
107
- for graph in unique_graphs:
108
- category = graph['metadata']['category']
109
- if category not in categories:
110
- categories[category] = []
111
- categories[category].append(graph['embedding'])
112
-
113
-
114
- for category, embeddings in categories.items():
115
- graphs_html += f"<h3>{category}</h3>"
116
- for embedding in embeddings:
117
- graphs_html += f"<div>{embedding}</div>"
118
-
119
-
120
- except Exception as e:
121
- print(f"Error getting graphs: {e}")
122
-
123
- return graphs_html
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/knowledge/__init__.py DELETED
File without changes
climateqa/knowledge/retriever.py DELETED
@@ -1,102 +0,0 @@
1
- # # https://github.com/langchain-ai/langchain/issues/8623
2
-
3
- # import pandas as pd
4
-
5
- # from langchain_core.retrievers import BaseRetriever
6
- # from langchain_core.vectorstores import VectorStoreRetriever
7
- # from langchain_core.documents.base import Document
8
- # from langchain_core.vectorstores import VectorStore
9
- # from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
10
-
11
- # from typing import List
12
- # from pydantic import Field
13
-
14
- # def _add_metadata_and_score(docs: List) -> Document:
15
- # # Add score to metadata
16
- # docs_with_metadata = []
17
- # for i,(doc,score) in enumerate(docs):
18
- # doc.page_content = doc.page_content.replace("\r\n"," ")
19
- # doc.metadata["similarity_score"] = score
20
- # doc.metadata["content"] = doc.page_content
21
- # doc.metadata["page_number"] = int(doc.metadata["page_number"]) + 1
22
- # # doc.page_content = f"""Doc {i+1} - {doc.metadata['short_name']}: {doc.page_content}"""
23
- # docs_with_metadata.append(doc)
24
- # return docs_with_metadata
25
-
26
- # class ClimateQARetriever(BaseRetriever):
27
- # vectorstore:VectorStore
28
- # sources:list = ["IPCC","IPBES","IPOS"]
29
- # reports:list = []
30
- # threshold:float = 0.6
31
- # k_summary:int = 3
32
- # k_total:int = 10
33
- # namespace:str = "vectors",
34
- # min_size:int = 200,
35
-
36
-
37
-
38
- # def _get_relevant_documents(
39
- # self, query: str, *, run_manager: CallbackManagerForRetrieverRun
40
- # ) -> List[Document]:
41
-
42
- # # Check if all elements in the list are either IPCC or IPBES
43
- # assert isinstance(self.sources,list)
44
- # assert self.sources
45
- # assert all([x in ["IPCC","IPBES","IPOS"] for x in self.sources])
46
- # assert self.k_total > self.k_summary, "k_total should be greater than k_summary"
47
-
48
- # # Prepare base search kwargs
49
- # filters = {}
50
-
51
- # if len(self.reports) > 0:
52
- # filters["short_name"] = {"$in":self.reports}
53
- # else:
54
- # filters["source"] = { "$in":self.sources}
55
-
56
- # # Search for k_summary documents in the summaries dataset
57
- # filters_summaries = {
58
- # **filters,
59
- # "chunk_type":"text",
60
- # "report_type": { "$in":["SPM"]},
61
- # }
62
-
63
- # docs_summaries = self.vectorstore.similarity_search_with_score(query=query,filter = filters_summaries,k = self.k_summary)
64
- # docs_summaries = [x for x in docs_summaries if x[1] > self.threshold]
65
- # # docs_summaries = []
66
-
67
- # # Search for k_total - k_summary documents in the full reports dataset
68
- # filters_full = {
69
- # **filters,
70
- # "chunk_type":"text",
71
- # "report_type": { "$nin":["SPM"]},
72
- # }
73
- # k_full = self.k_total - len(docs_summaries)
74
- # docs_full = self.vectorstore.similarity_search_with_score(query=query,filter = filters_full,k = k_full)
75
-
76
- # # Images
77
- # filters_image = {
78
- # **filters,
79
- # "chunk_type":"image"
80
- # }
81
- # docs_images = self.vectorstore.similarity_search_with_score(query=query,filter = filters_image,k = k_full)
82
-
83
- # # docs_images = []
84
-
85
- # # Concatenate documents
86
- # # docs = docs_summaries + docs_full + docs_images
87
-
88
- # # Filter if scores are below threshold
89
- # # docs = [x for x in docs if x[1] > self.threshold]
90
-
91
- # docs_summaries, docs_full, docs_images = _add_metadata_and_score(docs_summaries), _add_metadata_and_score(docs_full), _add_metadata_and_score(docs_images)
92
-
93
- # # Filter if length are below threshold
94
- # docs_summaries = [x for x in docs_summaries if len(x.page_content) > self.min_size]
95
- # docs_full = [x for x in docs_full if len(x.page_content) > self.min_size]
96
-
97
-
98
- # return {
99
- # "docs_summaries" : docs_summaries,
100
- # "docs_full" : docs_full,
101
- # "docs_images" : docs_images,
102
- # }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/papers/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ from pyalex import Works, Authors, Sources, Institutions, Concepts, Publishers, Funders
4
+ import pyalex
5
+
6
+ pyalex.config.email = "theo.alvesdacosta@ekimetrics.com"
7
+
8
+ class OpenAlex():
9
+ def __init__(self):
10
+ pass
11
+
12
+
13
+
14
+ def search(self,keywords,n_results = 100,after = None,before = None):
15
+ works = Works().search(keywords).get()
16
+
17
+ for page in works.paginate(per_page=n_results):
18
+ break
19
+
20
+ df_works = pd.DataFrame(page)
21
+
22
+ return works
23
+
24
+
25
+ def make_network(self):
26
+ pass
27
+
28
+
29
+ def get_abstract_from_inverted_index(self,index):
30
+
31
+ # Determine the maximum index to know the length of the reconstructed array
32
+ max_index = max([max(positions) for positions in index.values()])
33
+
34
+ # Initialize a list with placeholders for all positions
35
+ reconstructed = [''] * (max_index + 1)
36
+
37
+ # Iterate through the inverted index and place each token at its respective position(s)
38
+ for token, positions in index.items():
39
+ for position in positions:
40
+ reconstructed[position] = token
41
+
42
+ # Join the tokens to form the reconstructed sentence(s)
43
+ return ' '.join(reconstructed)
climateqa/{knowledge → papers}/openalex.py RENAMED
@@ -3,32 +3,18 @@ import networkx as nx
3
  import matplotlib.pyplot as plt
4
  from pyvis.network import Network
5
 
6
- from langchain_core.retrievers import BaseRetriever
7
- from langchain_core.vectorstores import VectorStoreRetriever
8
- from langchain_core.documents.base import Document
9
- from langchain_core.vectorstores import VectorStore
10
- from langchain_core.callbacks.manager import CallbackManagerForRetrieverRun
11
-
12
- from ..engine.utils import num_tokens_from_string
13
-
14
- from typing import List
15
- from pydantic import Field
16
-
17
  from pyalex import Works, Authors, Sources, Institutions, Concepts, Publishers, Funders
18
  import pyalex
19
 
20
  pyalex.config.email = "theo.alvesdacosta@ekimetrics.com"
21
 
22
-
23
- def replace_nan_with_empty_dict(x):
24
- return x if pd.notna(x) else {}
25
-
26
  class OpenAlex():
27
  def __init__(self):
28
  pass
29
 
30
 
31
- def search(self,keywords:str,n_results = 100,after = None,before = None):
 
32
 
33
  if isinstance(keywords,str):
34
  works = Works().search(keywords)
@@ -41,36 +27,29 @@ class OpenAlex():
41
  break
42
 
43
  df_works = pd.DataFrame(page)
44
-
45
- if df_works.empty:
46
- return df_works
47
-
48
- df_works = df_works.dropna(subset = ["title"])
49
- df_works["primary_location"] = df_works["primary_location"].map(replace_nan_with_empty_dict)
50
- df_works["abstract"] = df_works["abstract_inverted_index"].apply(lambda x: self.get_abstract_from_inverted_index(x)).fillna("")
51
  df_works["is_oa"] = df_works["open_access"].map(lambda x : x.get("is_oa",False))
52
  df_works["pdf_url"] = df_works["primary_location"].map(lambda x : x.get("pdf_url",None))
53
- df_works["url"] = df_works["id"]
54
- df_works["content"] = (df_works["title"] + "\n" + df_works["abstract"]).map(lambda x : x.strip())
55
- df_works["num_tokens"] = df_works["content"].map(lambda x : num_tokens_from_string(x))
56
-
57
- df_works = df_works.drop(columns = ["abstract_inverted_index"])
58
- df_works["display_name"] = df_works["primary_location"].apply(lambda x :x["source"] if type(x) == dict and 'source' in x else "").apply(lambda x : x["display_name"] if type(x) == dict and "display_name" in x else "")
59
- df_works["subtitle"] = df_works["title"].astype(str) + " - " + df_works["display_name"].astype(str) + " - " + df_works["publication_year"].astype(str)
60
 
61
- return df_works
62
  else:
63
- raise Exception("Keywords must be a string")
 
 
 
 
 
64
 
65
 
66
  def rerank(self,query,df,reranker):
67
 
68
  scores = reranker.rank(
69
  query,
70
- df["content"].tolist()
 
71
  )
72
- scores = sorted(scores.results, key = lambda x : x.document.doc_id)
73
- scores = [x.score for x in scores]
74
  df["rerank_score"] = scores
75
  return df
76
 
@@ -160,36 +139,4 @@ class OpenAlex():
160
  reconstructed[position] = token
161
 
162
  # Join the tokens to form the reconstructed sentence(s)
163
- return ' '.join(reconstructed)
164
-
165
-
166
-
167
- class OpenAlexRetriever(BaseRetriever):
168
- min_year:int = 1960
169
- max_year:int = None
170
- k:int = 100
171
-
172
- def _get_relevant_documents(
173
- self, query: str, *, run_manager: CallbackManagerForRetrieverRun
174
- ) -> List[Document]:
175
-
176
- openalex = OpenAlex()
177
-
178
- # Search for documents
179
- df_docs = openalex.search(query,n_results=self.k,after = self.min_year,before = self.max_year)
180
-
181
- docs = []
182
- for i,row in df_docs.iterrows():
183
- num_tokens = row["num_tokens"]
184
-
185
- if num_tokens < 50 or num_tokens > 1000:
186
- continue
187
-
188
- doc = Document(
189
- page_content = row["content"],
190
- metadata = row.to_dict()
191
- )
192
- docs.append(doc)
193
- return docs
194
-
195
-
 
3
  import matplotlib.pyplot as plt
4
  from pyvis.network import Network
5
 
 
 
 
 
 
 
 
 
 
 
 
6
  from pyalex import Works, Authors, Sources, Institutions, Concepts, Publishers, Funders
7
  import pyalex
8
 
9
  pyalex.config.email = "theo.alvesdacosta@ekimetrics.com"
10
 
 
 
 
 
11
  class OpenAlex():
12
  def __init__(self):
13
  pass
14
 
15
 
16
+
17
+ def search(self,keywords,n_results = 100,after = None,before = None):
18
 
19
  if isinstance(keywords,str):
20
  works = Works().search(keywords)
 
27
  break
28
 
29
  df_works = pd.DataFrame(page)
30
+ df_works["abstract"] = df_works["abstract_inverted_index"].apply(lambda x: self.get_abstract_from_inverted_index(x))
 
 
 
 
 
 
31
  df_works["is_oa"] = df_works["open_access"].map(lambda x : x.get("is_oa",False))
32
  df_works["pdf_url"] = df_works["primary_location"].map(lambda x : x.get("pdf_url",None))
33
+ df_works["content"] = df_works["title"] + "\n" + df_works["abstract"]
 
 
 
 
 
 
34
 
 
35
  else:
36
+ df_works = []
37
+ for keyword in keywords:
38
+ df_keyword = self.search(keyword,n_results = n_results,after = after,before = before)
39
+ df_works.append(df_keyword)
40
+ df_works = pd.concat(df_works,ignore_index=True,axis = 0)
41
+ return df_works
42
 
43
 
44
  def rerank(self,query,df,reranker):
45
 
46
  scores = reranker.rank(
47
  query,
48
+ df["content"].tolist(),
49
+ top_k = len(df),
50
  )
51
+ scores.sort(key = lambda x : x["corpus_id"])
52
+ scores = [x["score"] for x in scores]
53
  df["rerank_score"] = scores
54
  return df
55
 
 
139
  reconstructed[position] = token
140
 
141
  # Join the tokens to form the reconstructed sentence(s)
142
+ return ' '.join(reconstructed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
climateqa/utils.py CHANGED
@@ -20,16 +20,3 @@ def get_image_from_azure_blob_storage(path):
20
  file_object = get_file_from_azure_blob_storage(path)
21
  image = Image.open(file_object)
22
  return image
23
-
24
- def remove_duplicates_keep_highest_score(documents):
25
- unique_docs = {}
26
-
27
- for doc in documents:
28
- doc_id = doc.metadata.get('doc_id')
29
- if doc_id in unique_docs:
30
- if doc.metadata['reranking_score'] > unique_docs[doc_id].metadata['reranking_score']:
31
- unique_docs[doc_id] = doc
32
- else:
33
- unique_docs[doc_id] = doc
34
-
35
- return list(unique_docs.values())
 
20
  file_object = get_file_from_azure_blob_storage(path)
21
  image = Image.open(file_object)
22
  return image
 
 
 
 
 
 
 
 
 
 
 
 
 
front/__init__.py DELETED
File without changes
front/callbacks.py DELETED
File without changes
front/utils.py DELETED
@@ -1,341 +0,0 @@
1
-
2
- import re
3
- from collections import defaultdict
4
- from climateqa.utils import get_image_from_azure_blob_storage
5
- from climateqa.engine.chains.prompts import audience_prompts
6
- from PIL import Image
7
- from io import BytesIO
8
- import base64
9
-
10
-
11
- def make_pairs(lst:list)->list:
12
- """from a list of even lenght, make tupple pairs"""
13
- return [(lst[i], lst[i + 1]) for i in range(0, len(lst), 2)]
14
-
15
-
16
- def serialize_docs(docs:list)->list:
17
- new_docs = []
18
- for doc in docs:
19
- new_doc = {}
20
- new_doc["page_content"] = doc.page_content
21
- new_doc["metadata"] = doc.metadata
22
- new_docs.append(new_doc)
23
- return new_docs
24
-
25
-
26
-
27
- def parse_output_llm_with_sources(output:str)->str:
28
- # Split the content into a list of text and "[Doc X]" references
29
- content_parts = re.split(r'\[(Doc\s?\d+(?:,\s?Doc\s?\d+)*)\]', output)
30
- parts = []
31
- for part in content_parts:
32
- if part.startswith("Doc"):
33
- subparts = part.split(",")
34
- subparts = [subpart.lower().replace("doc","").strip() for subpart in subparts]
35
- subparts = [f"""<a href="#doc{subpart}" class="a-doc-ref" target="_self"><span class='doc-ref'><sup>{subpart}</sup></span></a>""" for subpart in subparts]
36
- parts.append("".join(subparts))
37
- else:
38
- parts.append(part)
39
- content_parts = "".join(parts)
40
- return content_parts
41
-
42
- def process_figures(docs:list, new_figures:list)->tuple:
43
- docs = docs + new_figures
44
-
45
- figures = '<div class="figures-container"><p></p> </div>'
46
- gallery = []
47
- used_figures = []
48
-
49
- if docs == []:
50
- return docs, figures, gallery
51
-
52
-
53
- docs_figures = [d for d in docs if d.metadata["chunk_type"] == "image"]
54
- for i_doc, doc in enumerate(docs_figures):
55
- if doc.metadata["chunk_type"] == "image":
56
- path = doc.metadata["image_path"]
57
-
58
-
59
- if path not in used_figures:
60
- used_figures.append(path)
61
- figure_number = len(used_figures)
62
-
63
- try:
64
- key = f"Image {figure_number}"
65
-
66
- image_path = doc.metadata["image_path"].split("documents/")[1]
67
- img = get_image_from_azure_blob_storage(image_path)
68
-
69
- # Convert the image to a byte buffer
70
- buffered = BytesIO()
71
- max_image_length = 500
72
- img_resized = img.resize((max_image_length, int(max_image_length * img.size[1]/img.size[0])))
73
- img_resized.save(buffered, format="PNG")
74
-
75
- img_str = base64.b64encode(buffered.getvalue()).decode()
76
-
77
- figures = figures + make_html_figure_sources(doc, figure_number, img_str)
78
- gallery.append(img)
79
- except Exception as e:
80
- print(f"Skipped adding image {figure_number} because of {e}")
81
-
82
- return docs, figures, gallery
83
-
84
-
85
- def generate_html_graphs(graphs:list)->str:
86
- # Organize graphs by category
87
- categories = defaultdict(list)
88
- for graph in graphs:
89
- category = graph['metadata']['category']
90
- categories[category].append(graph['embedding'])
91
-
92
- # Begin constructing the HTML
93
- html_code = '''
94
- <!DOCTYPE html>
95
- <html lang="en">
96
- <head>
97
- <meta charset="UTF-8">
98
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
99
- <title>Graphs by Category</title>
100
- <style>
101
- .tab-content {
102
- display: none;
103
- }
104
- .tab-content.active {
105
- display: block;
106
- }
107
- .tabs {
108
- margin-bottom: 20px;
109
- }
110
- .tab-button {
111
- background-color: #ddd;
112
- border: none;
113
- padding: 10px 20px;
114
- cursor: pointer;
115
- margin-right: 5px;
116
- }
117
- .tab-button.active {
118
- background-color: #ccc;
119
- }
120
- </style>
121
- <script>
122
- function showTab(tabId) {
123
- var contents = document.getElementsByClassName('tab-content');
124
- var buttons = document.getElementsByClassName('tab-button');
125
- for (var i = 0; i < contents.length; i++) {
126
- contents[i].classList.remove('active');
127
- buttons[i].classList.remove('active');
128
- }
129
- document.getElementById(tabId).classList.add('active');
130
- document.querySelector('button[data-tab="'+tabId+'"]').classList.add('active');
131
- }
132
- </script>
133
- </head>
134
- <body>
135
- <div class="tabs">
136
- '''
137
-
138
- # Add buttons for each category
139
- for i, category in enumerate(categories.keys()):
140
- active_class = 'active' if i == 0 else ''
141
- html_code += f'<button class="tab-button {active_class}" onclick="showTab(\'tab-{i}\')" data-tab="tab-{i}">{category}</button>'
142
-
143
- html_code += '</div>'
144
-
145
- # Add content for each category
146
- for i, (category, embeds) in enumerate(categories.items()):
147
- active_class = 'active' if i == 0 else ''
148
- html_code += f'<div id="tab-{i}" class="tab-content {active_class}">'
149
- for embed in embeds:
150
- html_code += embed
151
- html_code += '</div>'
152
-
153
- html_code += '''
154
- </body>
155
- </html>
156
- '''
157
-
158
- return html_code
159
-
160
-
161
-
162
- def make_html_source(source,i):
163
- meta = source.metadata
164
- # content = source.page_content.split(":",1)[1].strip()
165
- content = source.page_content.strip()
166
-
167
- toc_levels = []
168
- for j in range(2):
169
- level = meta[f"toc_level{j}"]
170
- if level != "N/A":
171
- toc_levels.append(level)
172
- else:
173
- break
174
- toc_levels = " > ".join(toc_levels)
175
-
176
- if len(toc_levels) > 0:
177
- name = f"<b>{toc_levels}</b><br/>{meta['name']}"
178
- else:
179
- name = meta['name']
180
-
181
- score = meta['reranking_score']
182
- if score > 0.8:
183
- color = "score-green"
184
- elif score > 0.5:
185
- color = "score-orange"
186
- else:
187
- color = "score-red"
188
-
189
- relevancy_score = f"<p class=relevancy-score>Relevancy score: <span class='{color}'>{score:.1%}</span></p>"
190
-
191
- if meta["chunk_type"] == "text":
192
-
193
- card = f"""
194
- <div class="card" id="doc{i}">
195
- <div class="card-content">
196
- <h2>Doc {i} - {meta['short_name']} - Page {int(meta['page_number'])}</h2>
197
- <p>{content}</p>
198
- {relevancy_score}
199
- </div>
200
- <div class="card-footer">
201
- <span>{name}</span>
202
- <a href="{meta['url']}#page={int(meta['page_number'])}" target="_blank" class="pdf-link">
203
- <span role="img" aria-label="Open PDF">🔗</span>
204
- </a>
205
- </div>
206
- </div>
207
- """
208
-
209
- else:
210
-
211
- if meta["figure_code"] != "N/A":
212
- title = f"{meta['figure_code']} - {meta['short_name']}"
213
- else:
214
- title = f"{meta['short_name']}"
215
-
216
- card = f"""
217
- <div class="card card-image">
218
- <div class="card-content">
219
- <h2>Image {i} - {title} - Page {int(meta['page_number'])}</h2>
220
- <p class='ai-generated'>AI-generated description</p>
221
- <p>{content}</p>
222
-
223
- {relevancy_score}
224
- </div>
225
- <div class="card-footer">
226
- <span>{name}</span>
227
- <a href="{meta['url']}#page={int(meta['page_number'])}" target="_blank" class="pdf-link">
228
- <span role="img" aria-label="Open PDF">🔗</span>
229
- </a>
230
- </div>
231
- </div>
232
- """
233
-
234
- return card
235
-
236
-
237
- def make_html_papers(df,i):
238
- title = df['title'][i]
239
- content = df['abstract'][i]
240
- url = df['doi'][i]
241
- publication_date = df['publication_year'][i]
242
- subtitle = df['subtitle'][i]
243
-
244
- card = f"""
245
- <div class="card" id="doc{i}">
246
- <div class="card-content">
247
- <h2>Doc {i+1} - {title}</h2>
248
- <p>{content}</p>
249
- </div>
250
- <div class="card-footer">
251
- <span>{subtitle}</span>
252
- <a href="{url}" target="_blank" class="pdf-link">
253
- <span role="img" aria-label="Open paper">🔗</span>
254
- </a>
255
- </div>
256
- </div>
257
- """
258
-
259
- return card
260
-
261
-
262
- def make_html_figure_sources(source,i,img_str):
263
- meta = source.metadata
264
- content = source.page_content.strip()
265
-
266
- score = meta['reranking_score']
267
- if score > 0.8:
268
- color = "score-green"
269
- elif score > 0.5:
270
- color = "score-orange"
271
- else:
272
- color = "score-red"
273
-
274
- toc_levels = []
275
- if len(toc_levels) > 0:
276
- name = f"<b>{toc_levels}</b><br/>{meta['name']}"
277
- else:
278
- name = meta['name']
279
-
280
- relevancy_score = f"<p class=relevancy-score>Relevancy score: <span class='{color}'>{score:.1%}</span></p>"
281
-
282
- if meta["figure_code"] != "N/A":
283
- title = f"{meta['figure_code']} - {meta['short_name']}"
284
- else:
285
- title = f"{meta['short_name']}"
286
-
287
- card = f"""
288
- <div class="card card-image">
289
- <div class="card-content">
290
- <h2>Image {i} - {title} - Page {int(meta['page_number'])}</h2>
291
- <img src="data:image/png;base64, { img_str }" alt="Alt text" />
292
- <p class='ai-generated'>AI-generated description</p>
293
-
294
- <p>{content}</p>
295
-
296
- {relevancy_score}
297
- </div>
298
- <div class="card-footer">
299
- <span>{name}</span>
300
- <a href="{meta['url']}#page={int(meta['page_number'])}" target="_blank" class="pdf-link">
301
- <span role="img" aria-label="Open PDF">🔗</span>
302
- </a>
303
- </div>
304
- </div>
305
- """
306
- return card
307
-
308
-
309
-
310
- def make_toolbox(tool_name,description = "",checked = False,elem_id = "toggle"):
311
-
312
- if checked:
313
- span = "<span class='checkmark'>&#10003;</span>"
314
- else:
315
- span = "<span class='loader'></span>"
316
-
317
- # toolbox = f"""
318
- # <div class="dropdown">
319
- # <label for="{elem_id}" class="dropdown-toggle">
320
- # {span}
321
- # {tool_name}
322
- # <span class="caret"></span>
323
- # </label>
324
- # <input type="checkbox" id="{elem_id}" hidden/>
325
- # <div class="dropdown-content">
326
- # <p>{description}</p>
327
- # </div>
328
- # </div>
329
- # """
330
-
331
-
332
- toolbox = f"""
333
- <div class="dropdown">
334
- <label for="{elem_id}" class="dropdown-toggle">
335
- {span}
336
- {tool_name}
337
- </label>
338
- </div>
339
- """
340
-
341
- return toolbox
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,21 +1,13 @@
1
- gradio==5.0.2
2
  azure-storage-file-share==12.11.1
3
  azure-storage-blob
4
  python-dotenv==1.0.0
5
- langchain==0.2.1
6
- langchain_openai==0.1.7
7
- langgraph==0.0.55
8
- pinecone-client==4.1.0
9
  sentence-transformers==2.6.0
10
  huggingface-hub
 
11
  pyalex==0.13
12
  networkx==3.2.1
13
- pyvis==0.3.2
14
- flashrank==0.2.5
15
- rerankers==0.3.0
16
- torch==2.3.0
17
- nvidia-cudnn-cu12==8.9.2.26
18
- langchain-community==0.2
19
- msal==1.31
20
- matplotlib==3.9.2
21
- gradio-modal==0.0.4
 
1
+ gradio==4.19.1
2
  azure-storage-file-share==12.11.1
3
  azure-storage-blob
4
  python-dotenv==1.0.0
5
+ langchain==0.1.4
6
+ langchain_openai==0.0.6
7
+ pinecone-client==3.0.2
 
8
  sentence-transformers==2.6.0
9
  huggingface-hub
10
+ msal
11
  pyalex==0.13
12
  networkx==3.2.1
13
+ pyvis==0.3.2
 
 
 
 
 
 
 
 
sandbox/20240310 - CQA - Semantic Routing 1.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
sandbox/20240702 - CQA - Graph Functionality.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
sandbox/20241104 - CQA - StepByStep CQA.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
style.css CHANGED
@@ -3,79 +3,6 @@
3
  --user-image: url('https://ih1.redbubble.net/image.4776899543.6215/st,small,507x507-pad,600x600,f8f8f8.jpg');
4
  } */
5
 
6
- #tab-recommended_content{
7
- padding-top: 0px;
8
- padding-left : 0px;
9
- padding-right: 0px;
10
- }
11
- #group-subtabs {
12
- /* display: block; */
13
- position : sticky;
14
- }
15
-
16
-
17
- }
18
-
19
-
20
- #papers-summary-popup button span{
21
- /* make label of accordio in bold, center, and bigger */
22
- font-size: 16px;
23
- font-weight: bold;
24
- text-align: center;
25
-
26
- }
27
-
28
- #papers-relevant-popup span{
29
- /* make label of accordio in bold, center, and bigger */
30
- font-size: 16px;
31
- font-weight: bold;
32
- text-align: center;
33
- }
34
-
35
-
36
-
37
- #tab-citations .button{
38
- padding: 12px 16px;
39
- font-size: 16px;
40
- font-weight: bold;
41
- cursor: pointer;
42
- border: none;
43
- outline: none;
44
- text-align: left;
45
- transition: background-color 0.3s ease;
46
- }
47
-
48
-
49
- .gradio-container {
50
- width: 100%!important;
51
- max-width: 100% !important;
52
- }
53
-
54
- /* fix for huggingface infinite growth*/
55
- main.flex.flex-1.flex-col {
56
- max-height: 95vh !important;
57
- }
58
-
59
- button#show-figures{
60
- /* Base styles */
61
- background-color: #f5f5f5;
62
- border: 1px solid #e0e0e0;
63
- border-radius: 4px;
64
- color: #333333;
65
- cursor: pointer;
66
- width: 100%;
67
- text-align: center;
68
- }
69
-
70
- .avatar-container.svelte-1x5p6hu:not(.thumbnail-item) img {
71
- width: 100%;
72
- height: 100%;
73
- object-fit: cover;
74
- border-radius: 50%;
75
- padding: 0px;
76
- margin: 0px;
77
- }
78
-
79
  .warning-box {
80
  background-color: #fff3cd;
81
  border: 1px solid #ffeeba;
@@ -130,26 +57,14 @@ body.dark .tip-box * {
130
 
131
  .message{
132
  font-size:14px !important;
133
-
134
- }
135
- .card-content img {
136
- display: block;
137
- margin: auto;
138
- max-width: 100%; /* Ensures the image is responsive */
139
- height: auto;
140
  }
141
 
 
142
  a {
143
  text-decoration: none;
144
  color: inherit;
145
  }
146
 
147
- .doc-ref sup{
148
- color:#dc2626!important;
149
- /* margin-right:1px; */
150
- }
151
-
152
-
153
  .card {
154
  background-color: white;
155
  border-radius: 10px;
@@ -213,183 +128,94 @@ a {
213
  border:none;
214
  }
215
 
216
-
217
- label.selected{
218
- background: #93c5fd !important;
219
  }
220
 
221
- #submit-button{
222
- padding:0px !important;
223
  }
224
 
225
- #modal-config .block.modal-block.padded {
226
- padding-top: 25px;
227
- height: 100vh;
228
-
229
- }
230
- #modal-config .modal-container{
231
- margin: 0px;
232
- padding: 0px;
233
- }
234
- /* Modal styles */
235
- #modal-config {
236
- position: fixed;
237
- top: 0;
238
- left: 0;
239
- height: 100vh;
240
- width: 500px;
241
- background-color: white;
242
- box-shadow: 2px 0 10px rgba(0, 0, 0, 0.1);
243
- z-index: 1000;
244
- padding: 15px;
245
- transform: none;
246
- }
247
- #modal-config .close{
248
- display: none;
249
  }
250
 
251
- /* Push main content to the right when modal is open */
252
- /* .modal ~ * {
253
- margin-left: 300px;
254
- transition: margin-left 0.3s ease;
255
  } */
256
 
257
- #modal-config .modal .wrap ul{
258
- position:static;
259
- top: 100%;
260
- left: 0;
261
- /* min-height: 100px; */
262
- height: 100%;
263
- /* margin-top: 0; */
264
- z-index: 9999;
265
- pointer-events: auto;
266
- height: 200px;
267
- }
268
- #config-button{
269
- background: none;
270
- border: none;
271
- padding: 8px;
272
- cursor: pointer;
273
- width: 40px;
274
- height: 40px;
275
- display: flex;
276
- align-items: center;
277
- justify-content: center;
278
- border-radius: 50%;
279
- transition: background-color 0.2s;
280
- }
281
 
282
- #config-button::before {
283
- content: '⚙️';
284
- font-size: 20px;
285
- }
286
 
287
- #config-button:hover {
288
- background-color: rgba(0, 0, 0, 0.1);
289
- }
290
 
291
- #checkbox-config{
292
- display: block;
 
 
 
 
 
 
293
  position: absolute;
294
- background: none;
295
- border: none;
296
- padding: 8px;
297
- cursor: pointer;
298
- width: 40px;
299
- height: 40px;
300
- display: flex;
301
- align-items: center;
302
- justify-content: center;
303
  border-radius: 50%;
304
- transition: background-color 0.2s;
305
- font-size: 20px;
306
- text-align: center;
307
- }
308
- #checkbox-config:checked{
309
- display: block;
310
  }
311
 
 
 
 
312
 
313
 
314
  @media screen and (min-width: 1024px) {
315
- /* Additional style for scrollable tab content */
316
- /* div#tab-recommended_content {
317
- overflow-y: auto;
318
- max-height: 80vh;
319
- } */
320
-
321
- .gradio-container {
322
- max-height: calc(100vh - 190px) !important;
323
- overflow: hidden;
324
- }
325
- /* div#chatbot{
326
- height:calc(100vh - 170px) !important;
327
- max-height:calc(100vh - 170px) !important;
328
-
329
- } */
330
-
331
-
332
-
333
  div#tab-examples{
334
  height:calc(100vh - 190px) !important;
335
- overflow-y: scroll !important;
336
- /* overflow-y: auto; */
337
  }
338
 
339
  div#sources-textbox{
340
  height:calc(100vh - 190px) !important;
341
- overflow-y: scroll !important;
342
- /* overflow-y: auto !important; */
343
- }
344
- div#graphs-container{
345
- height:calc(100vh - 210px) !important;
346
- overflow-y: scroll !important;
347
- }
348
-
349
- div#sources-figures{
350
- height:calc(100vh - 300px) !important;
351
- max-height: 90vh !important;
352
- overflow-y: scroll !important;
353
- }
354
-
355
- div#graphs-container{
356
- height:calc(100vh - 300px) !important;
357
- max-height: 90vh !important;
358
- overflow-y: scroll !important;
359
- }
360
-
361
- div#tab-citations{
362
- height:calc(100vh - 300px) !important;
363
- max-height: 90vh !important;
364
- overflow-y: scroll !important;
365
  }
366
 
367
  div#tab-config{
368
  height:calc(100vh - 190px) !important;
369
- overflow-y: scroll !important;
370
- /* overflow-y: auto !important; */
371
  }
372
 
373
- /* Force container to respect height limits */
374
- .main-component{
375
- contain: size layout;
376
- overflow: hidden;
377
- }
378
-
379
-
380
  div#chatbot-row{
381
- max-height:calc(100vh - 90px) !important;
382
  }
383
- /*
384
 
 
 
 
385
 
386
  .max-height{
387
  height:calc(100vh - 90px) !important;
388
- max-height:calc(100vh - 90px) !important;
389
  overflow-y: auto;
390
  }
391
- */
392
 
 
 
 
 
 
393
  }
394
 
395
  footer {
@@ -432,33 +258,21 @@ footer {
432
  /* ... add other mobile-specific styles ... */
433
  }
434
 
435
- @media (prefers-color-scheme: dark) {
436
- .card{
437
- background-color: #374151;
438
- }
439
- .card-image > .card-content{
440
- background-color: rgb(55, 65, 81) !important;
441
- }
442
 
443
- .card-footer {
444
- background-color: #404652;
445
- }
446
 
447
- .container > .wrap{
448
- background-color: #374151 !important;
449
- color:white !important;
450
- }
451
- .card-content h2{
452
- color:#e7754f !important;
453
- }
454
- .doc-ref sup{
455
- color:rgb(235 109 35)!important;
456
- /* margin-right:1px; */
457
- }
458
- .card-footer span {
459
- color:white !important;
460
- }
461
-
462
  }
463
 
464
 
@@ -504,7 +318,7 @@ span.chatbot > p > img{
504
  }
505
 
506
  .card-image > .card-content{
507
- background-color:#f1f7fa;
508
  }
509
 
510
 
@@ -530,7 +344,8 @@ span.chatbot > p > img{
530
  }
531
 
532
  #dropdown-samples{
533
-
 
534
  background:none !important;
535
 
536
  }
@@ -548,190 +363,3 @@ span.chatbot > p > img{
548
  .a-doc-ref{
549
  text-decoration: none !important;
550
  }
551
-
552
-
553
- .dropdown {
554
- position: relative;
555
- display:inline-block;
556
- margin-bottom: 10px;
557
- }
558
-
559
- .dropdown-toggle {
560
- background-color: #f2f2f2;
561
- color: black;
562
- padding: 10px;
563
- font-size: 16px;
564
- cursor: pointer;
565
- display: block;
566
- width: 400px; /* Adjust width as needed */
567
- position: relative;
568
- display: flex;
569
- align-items: center; /* Vertically center the contents */
570
- justify-content: left;
571
- }
572
-
573
- .dropdown-toggle .caret {
574
- content: "";
575
- position: absolute;
576
- right: 10px;
577
- top: 50%;
578
- border-left: 5px solid transparent;
579
- border-right: 5px solid transparent;
580
- border-top: 5px solid black;
581
- transform: translateY(-50%);
582
- }
583
-
584
- input[type="checkbox"] {
585
- display: none !important;
586
- }
587
-
588
- input[type="checkbox"]:checked + .dropdown-content {
589
- display: block;
590
- }
591
-
592
- #checkbox-chat input[type="checkbox"] {
593
- display: flex !important;
594
- }
595
-
596
- .dropdown-content {
597
- display: none;
598
- position: absolute;
599
- background-color: #f9f9f9;
600
- min-width: 300px;
601
- box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2);
602
- z-index: 1;
603
- padding: 12px;
604
- border: 1px solid #ccc;
605
- }
606
-
607
- input[type="checkbox"]:checked + .dropdown-toggle + .dropdown-content {
608
- display: block;
609
- }
610
-
611
- input[type="checkbox"]:checked + .dropdown-toggle .caret {
612
- border-top: 0;
613
- border-bottom: 5px solid black;
614
- }
615
-
616
- .loader {
617
- border: 1px solid #d0d0d0 !important; /* Light grey background */
618
- border-top: 1px solid #db3434 !important; /* Blue color */
619
- border-right: 1px solid #3498db !important; /* Blue color */
620
- border-radius: 50%;
621
- width: 20px;
622
- height: 20px;
623
- animation: spin 2s linear infinite;
624
- display:inline-block;
625
- margin-right:10px !important;
626
- }
627
-
628
- .checkmark{
629
- color:green !important;
630
- font-size:18px;
631
- margin-right:10px !important;
632
- }
633
-
634
- @keyframes spin {
635
- 0% { transform: rotate(0deg); }
636
- 100% { transform: rotate(360deg); }
637
- }
638
-
639
-
640
- .relevancy-score{
641
- margin-top:10px !important;
642
- font-size:10px !important;
643
- font-style:italic;
644
- }
645
-
646
- .score-green{
647
- color:green !important;
648
- }
649
-
650
- .score-orange{
651
- color:orange !important;
652
- }
653
-
654
- .score-red{
655
- color:red !important;
656
- }
657
-
658
- /* Mobile specific adjustments */
659
- @media screen and (max-width: 767px) {
660
- div#tab-recommended_content {
661
- max-height: 50vh; /* Reduce height for smaller screens */
662
- overflow-y: auto;
663
- }
664
- }
665
-
666
- /* Additional style for scrollable tab content */
667
- div#tab-saved-graphs {
668
- overflow-y: auto; /* Enable vertical scrolling */
669
- max-height: 80vh; /* Adjust height as needed */
670
- }
671
-
672
- /* Mobile specific adjustments */
673
- @media screen and (max-width: 767px) {
674
- div#tab-saved-graphs {
675
- max-height: 50vh; /* Reduce height for smaller screens */
676
- overflow-y: auto;
677
- }
678
- }
679
- .message-buttons-left.panel.message-buttons.with-avatar {
680
- display: none;
681
- }
682
-
683
-
684
- /* Specific fixes for Hugging Face Space iframe */
685
- .h-full {
686
- height: auto !important;
687
- min-height: 0 !important;
688
- }
689
-
690
- .space-content {
691
- height: auto !important;
692
- max-height: 100vh !important;
693
- overflow: hidden;
694
- }
695
-
696
-
697
- /* Mobile specific modal configuration */
698
- @media screen and (max-width: 767px) {
699
- #modal-config {
700
- width: 100%; /* Full width on mobile */
701
- height: 100vh;
702
- left: 0;
703
- top: 0;
704
- padding: 10px; /* Reduced padding for mobile */
705
- }
706
-
707
- #modal-config .block.modal-block.padded {
708
- padding-top: 15px; /* Reduced top padding */
709
- height: 100vh;
710
- overflow-y: auto; /* Enable scrolling */
711
- }
712
-
713
- #modal-config .modal-container {
714
- width: 100%;
715
- height: 100%;
716
- }
717
-
718
- /* Show close button on mobile */
719
- #modal-config .close {
720
- display: block;
721
- position: absolute;
722
- top: 10px;
723
- right: 10px;
724
- z-index: 1001;
725
- padding: 8px;
726
- font-size: 24px;
727
- background: none;
728
- border: none;
729
- cursor: pointer;
730
- }
731
-
732
- /* Ensure modal content is scrollable on mobile */
733
- #modal-config .modal .wrap ul {
734
- max-height: calc(100vh - 60px); /* Account for header space */
735
- overflow-y: auto;
736
- }
737
- }
 
3
  --user-image: url('https://ih1.redbubble.net/image.4776899543.6215/st,small,507x507-pad,600x600,f8f8f8.jpg');
4
  } */
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  .warning-box {
7
  background-color: #fff3cd;
8
  border: 1px solid #ffeeba;
 
57
 
58
  .message{
59
  font-size:14px !important;
 
 
 
 
 
 
 
60
  }
61
 
62
+
63
  a {
64
  text-decoration: none;
65
  color: inherit;
66
  }
67
 
 
 
 
 
 
 
68
  .card {
69
  background-color: white;
70
  border-radius: 10px;
 
128
  border:none;
129
  }
130
 
131
+ /* .gallery-item > div:hover{
132
+ background-color:#7494b0 !important;
133
+ color:white!important;
134
  }
135
 
136
+ .gallery-item:hover{
137
+ border:#7494b0 !important;
138
  }
139
 
140
+ .gallery-item > div{
141
+ background-color:white !important;
142
+ color:#577b9b!important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  }
144
 
145
+ .label{
146
+ color:#577b9b!important;
 
 
147
  } */
148
 
149
+ /* .paginate{
150
+ color:#577b9b!important;
151
+ } */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
 
 
 
 
153
 
 
 
 
154
 
155
+ /* span[data-testid="block-info"]{
156
+ background:none !important;
157
+ color:#577b9b;
158
+ } */
159
+
160
+ /* Pseudo-element for the circularly cropped picture */
161
+ /* .message.bot::before {
162
+ content: '';
163
  position: absolute;
164
+ top: -10px;
165
+ left: -10px;
166
+ width: 30px;
167
+ height: 30px;
168
+ background-image: var(--user-image);
169
+ background-size: cover;
170
+ background-position: center;
 
 
171
  border-radius: 50%;
172
+ z-index: 10;
173
+ }
174
+ */
175
+
176
+ label.selected{
177
+ background:none !important;
178
  }
179
 
180
+ #submit-button{
181
+ padding:0px !important;
182
+ }
183
 
184
 
185
  @media screen and (min-width: 1024px) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  div#tab-examples{
187
  height:calc(100vh - 190px) !important;
188
+ overflow-y: auto;
 
189
  }
190
 
191
  div#sources-textbox{
192
  height:calc(100vh - 190px) !important;
193
+ overflow-y: auto !important;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  }
195
 
196
  div#tab-config{
197
  height:calc(100vh - 190px) !important;
198
+ overflow-y: auto !important;
 
199
  }
200
 
 
 
 
 
 
 
 
201
  div#chatbot-row{
202
+ height:calc(100vh - 90px) !important;
203
  }
 
204
 
205
+ div#chatbot{
206
+ height:calc(100vh - 170px) !important;
207
+ }
208
 
209
  .max-height{
210
  height:calc(100vh - 90px) !important;
 
211
  overflow-y: auto;
212
  }
 
213
 
214
+ /* .tabitem:nth-child(n+3) {
215
+ padding-top:30px;
216
+ padding-left:40px;
217
+ padding-right:40px;
218
+ } */
219
  }
220
 
221
  footer {
 
258
  /* ... add other mobile-specific styles ... */
259
  }
260
 
 
 
 
 
 
 
 
261
 
262
+ body.dark .card{
263
+ background-color: #374151;
264
+ }
265
 
266
+ body.dark .card-content h2{
267
+ color:#f4dbd3 !important;
268
+ }
269
+
270
+ body.dark .card-footer {
271
+ background-color: #404652;
272
+ }
273
+
274
+ body.dark .card-footer span {
275
+ color:white !important;
 
 
 
 
 
276
  }
277
 
278
 
 
318
  }
319
 
320
  .card-image > .card-content{
321
+ background-color:#f1f7fa !important;
322
  }
323
 
324
 
 
344
  }
345
 
346
  #dropdown-samples{
347
+ /*! border:none !important; */
348
+ /*! border-width:0px !important; */
349
  background:none !important;
350
 
351
  }
 
363
  .a-doc-ref{
364
  text-decoration: none !important;
365
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.json DELETED
File without changes