File size: 7,528 Bytes
ca49a1b
 
 
 
674face
ca49a1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfa3ec5
 
1e6395c
 
 
 
ca49a1b
1e6395c
ca49a1b
 
 
f95af38
ca49a1b
 
 
 
 
 
 
 
 
 
76c2d0a
 
 
 
0881dc7
1e6395c
ca49a1b
 
1e6395c
ca49a1b
 
 
 
 
 
1e6395c
 
ca49a1b
 
1e6395c
 
 
 
 
 
 
 
 
 
ca49a1b
 
6193768
 
 
 
 
a7844e4
6193768
 
 
 
 
 
a7844e4
6193768
a7844e4
 
 
39fdf31
a7844e4
9f8d49e
76c2d0a
184544f
78d3b40
ca49a1b
 
 
a7844e4
8d37ffc
6193768
8711c74
 
a7844e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f8d49e
ca49a1b
 
 
 
 
f95af38
 
ca49a1b
 
 
 
 
 
 
bfa3ec5
ca49a1b
 
 
 
 
 
 
 
 
f95af38
ca49a1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8edade7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# inspiration from Ekimetrics climate qa

import streamlit as st
import os
import re
import json
from dotenv import load_dotenv
from haystack.nodes.prompt import PromptNode, PromptTemplate
from haystack.nodes import EmbeddingRetriever
from haystack import Pipeline
import numpy as np
import pandas as pd
from haystack.document_stores import FAISSDocumentStore
from haystack.nodes import EmbeddingRetriever
from haystack.schema import Document


# Enter openai API key
openai_key = os.environ["OPENAI_API_KEY"]

# Select model
model_name = "gpt-3.5-turbo"

# Define the template
template = PromptTemplate(
    prompt="""
Answer the given question using the following documents. \
Formulate your answer in the style of an academic report. \
Provide example quotes and citations using extracted text from the documents. \
Use facts and numbers from the documents in your answer. \
Reference information used from documents at the end of each applicable sentence (ex: [source: document_name]), where 'document_name' is the text provided at the start of each document (demarcated by '- &&&' and '&&&:')'. \
If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer. \

Context: {' - '.join(['&&& '+d.meta['document_name']+' ref. '+str(d.meta['ref_id'])+' &&&: '+d.content for d in documents])}; Question: {query}; Answer:""",
)

# Create a list of options for the dropdown
country_options = ['Angola','Botswana','Lesotho','Kenya','Malawi','Mozambique','Namibia','South Africa','Zambia','Zimbabwe']

# List of examples
examples = [
    "-",
    "What specific initiatives are presented in the context to address the needs of groups such women and children to the effects climate change?",
    "In addition to gender, children, and youth, is there any mention of other groups facing disproportional impacts from climate change due to their geographic location, socio-economic status, age, gender, health, and occupation?"
]

def get_docs(input_query, country = None):
    # Construct a hacky query to focus the retriever on the target country (see notes below)
    if country:
        query = "For the country of "+country+", "+input_query
    else:
        query = input_query
    
    # Get top 150 because we want to make sure we have 10 pertaining to the selected country
    # TEMP SOLUTION: not ideal, but FAISS document store doesnt allow metadata filtering. Needs to be tested with the full dataset
    docs = retriever.retrieve(query=query,top_k = 150)
    # Break out the key fields and convert to pandas for filtering
    docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs]
    df_docs = pd.DataFrame(docs)
    if country:
      df_docs = df_docs.query('country in @country')
    # Take the top 10
    df_docs = df_docs.head(10)
    df_docs = df_docs.reset_index()
    df_docs['ref_id'] = df_docs.index + 1
    # Convert back to Document format
    ls_dict = []
    # Iterate over df and add relevant fields to the dict object
    for index, row in df_docs.iterrows():
        # Create a Document object for each row
        doc = Document(
            row['content'],
            meta={'country': row['country'],'document_name': row['document'], 'ref_id': row['ref_id'], 'score': row['score']}
        )
        
        # Append the Document object to the documents list
        ls_dict.append(doc)
    return(ls_dict)

def get_refs(res):
  '''
  Parse response for engineered reference ids (refer to prompt template)
  Extract documents using reference ids
  '''
  import re
  text = res["results"][0]
  # This pattern should be returned by gpt3.5
  # pattern = r'ref\. (\d+)\]\.'
  pattern = r'ref\. (\d+)'
  ref_ids = [int(match) for match in re.findall(pattern, text)]
  # extract
  result_str = ""  # Initialize an empty string to store the result
  for i in range(len(res['documents'])):
      doc = res['documents'][i].to_dict()
      ref_id = doc['meta']['ref_id']
      if ref_id in ref_ids:
             result_str += "**Ref. " + str(ref_id) + " [" + doc['meta']['country'] + " " + doc['meta']['document_name'] + "]:** " + "*'" + doc['content'] + "'*<br> <br>" # Add <br> for a line break
  return result_str 

def run_query(input_text, country):
    docs = get_docs(input_text, country)
    st.write('Selected country: ', country) # Debugging country
    res = pipe.run(query=input_text, documents=docs)
    output = res["results"][0]
    st.write('Response')
    st.success(output)
    references = get_refs(res)
    st.write('References')
    st.markdown(references, unsafe_allow_html=True)


# def run_query(input_text):
#     docs = get_docs(input_text)
#     st.write('Debug: Documents', docs) # Debugging output

#     res = pipe.run(query=input_text, documents=docs)
#     st.write('Debug: Results', res) # Debugging output

#     output = res["results"][0]
#     st.write('Response')
#     st.success(str(output))

#     references = get_refs(res)
#     st.write('Debug: References', references) # Debugging output

#     st.write('References')
#     if references:
#         st.success(str(references))
#     else:
#         st.warning('No references found.')



# Setup retriever, pulling from local faiss datastore
retriever = EmbeddingRetriever(
    document_store=FAISSDocumentStore.load(
        index_path="./cpv_southern_africa_kenya.faiss",
        config_path="./cpv_southern_africa_kenya.json",
    ),
    embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1",
    model_format="sentence_transformers",
    progress_bar=False,
)

# Initialize the PromptNode
pn = PromptNode(model_name_or_path=model_name, default_prompt_template=template, api_key=openai_key, max_length=2000, model_kwargs={"generation_kwargs": {"do_sample": False, "temperature": 0}})

# Initialize the pipeline
pipe = Pipeline()
pipe.add_node(component=pn, name="prompt_node", inputs=["Query"])


# Guiding text             
st.title('Climate Policy Documents: Vulnerabilities Analysis Q&A (test)')
st.markdown('This tool seeks to provide an interface for quering national climate policy documents (NDCs, LTS etc.). The current version is powered by chatGPT (3.5) and limited to 9 Southern African countries (Angola, Botswana, Eswatini, Lesotho, Malawi, Mozambique, Namibia, South Africa, Zambia, Zimbabwe). The intended use case is to allow users to interact with the documents and obtain valuable insights on various vulnerable groups affected by climate change.')
st.markdown('**DISCLAIMER:** This prototype tool based on LLMs (Language Models) is provided "as is" for experimental and exploratory purposes only, and should not be used for critical or production applications. Users are advised that the tool may contain errors, bugs, or limitations and should be used with caution and awareness of potential risks, and the developers make no warranties or guarantees regarding its performance, reliability, or suitability for any specific purpose.')
    

# Dropdown selectbox
country = st.selectbox('Select a country:', country_options) 

# Display the text passages as radio buttons
selected_example = st.radio("Example questions", examples)

if selected_example == "-":
    text = st.text_area('Enter your question in the text box below using natural language or select an example from above:')
else:
    text = st.text_area('Enter your question in the text box below using natural language or select an example from above:', value=selected_example)


if st.button('Submit'):
    run_query(text, country=country)