|
|
|
|
|
import streamlit as st |
|
import os |
|
import json |
|
from dotenv import load_dotenv |
|
from haystack.nodes.prompt import PromptNode, PromptTemplate |
|
from haystack.nodes import EmbeddingRetriever |
|
from haystack import Pipeline |
|
import numpy as np |
|
import pandas as pd |
|
from haystack.document_stores import FAISSDocumentStore |
|
from haystack.nodes import EmbeddingRetriever |
|
from haystack.schema import Document |
|
|
|
|
|
|
|
openai_key = os.environ["OPENAI_API_KEY"] |
|
|
|
|
|
model_name = "gpt-3.5-turbo" |
|
|
|
|
|
template = PromptTemplate( |
|
prompt=""" |
|
Answer the given question using the following documents \ |
|
Formulate your answer in the style of an academic report \ |
|
Provide example quotes and citations using extracted text from the documents. \ |
|
Use facts and numbers from the documents in your answer. \ |
|
Reference information used from documents at the end of each applicable sentence (ex: [source: document_name]), where 'document_name' is the text provided at the start of each document (demarcated by '- &&&' and '&&&:')'. \ |
|
If no relevant information to answer the question is present in the documents, just say you don't have enough information to answer. \ |
|
Format your response as a JSON object with "answer" and "sources" as the keys. \ |
|
The "answer" key is the response to the query and "sources" key is the reference information used from the documents. \ |
|
|
|
Context: {' - '.join(['&&& '+d.meta['document_name']+' ref. '+str(d.meta['ref_id'])+' &&&: '+d.content for d in documents])}; Question: {query}; Answer:""", |
|
) |
|
|
|
|
|
country_options = ['Angola','Botswana','Lesotho','Kenya','Malawi','Mozambique','Namibia','South Africa','Zambia','Zimbabwe'] |
|
|
|
|
|
examples = [ |
|
"-", |
|
"What specific initiatives are presented in the context to address the needs of groups such women and children to the effects climate change?", |
|
"In addition to gender, children, and youth, is there any mention of other groups facing disproportional impacts from climate change due to their geographic location, socio-economic status, age, gender, health, and occupation?" |
|
] |
|
|
|
def get_docs(input_query, country = None): |
|
|
|
if country: |
|
query = "For the country of "+country+", "+input_query |
|
else: |
|
query = input_query |
|
|
|
|
|
docs = retriever.retrieve(query=query,top_k = 150) |
|
|
|
docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs] |
|
df_docs = pd.DataFrame(docs) |
|
if country: |
|
df_docs = df_docs.query('country in @country') |
|
|
|
df_docs = df_docs.head(10) |
|
df_docs = df_docs.reset_index() |
|
df_docs['ref_id'] = df_docs.index + 1 |
|
|
|
ls_dict = [] |
|
|
|
for index, row in df_docs.iterrows(): |
|
|
|
doc = Document( |
|
row['content'], |
|
meta={'country': row['country'],'document_name': row['document'], 'ref_id': row['ref_id'], 'score': row['score']} |
|
) |
|
|
|
|
|
ls_dict.append(doc) |
|
return(ls_dict) |
|
|
|
def get_refs(res): |
|
''' |
|
Parse response for engineered reference ids (refer to prompt template) |
|
Extract documents using reference ids |
|
''' |
|
import re |
|
text = res["results"][0] |
|
|
|
|
|
pattern = r'ref\. (\d+)' |
|
ref_ids = [int(match) for match in re.findall(pattern, text)] |
|
|
|
for i in range(len(res['documents'])): |
|
doc = res['documents'][i].to_dict() |
|
ref_id = doc['meta']['ref_id'] |
|
if ref_id in ref_ids: |
|
print("Ref. "+str(ref_id)+" ["+doc['meta']['country']+" "+doc['meta']['document_name']+"]: "+doc['content']) |
|
|
|
def run_query(input_text): |
|
docs = get_docs(input_text) |
|
res = pipe.run(query=input_text, documents=docs) |
|
output = res["results"][0] |
|
st.write('Response') |
|
st.success(output) |
|
st.write('References') |
|
st.success(get_refs(res)) |
|
|
|
|
|
|
|
retriever = EmbeddingRetriever( |
|
document_store=FAISSDocumentStore.load( |
|
index_path="./cpv_southern_africa_kenya.faiss", |
|
config_path="./cpv_southern_africa_kenya.json", |
|
), |
|
embedding_model="sentence-transformers/multi-qa-mpnet-base-dot-v1", |
|
model_format="sentence_transformers", |
|
progress_bar=False, |
|
) |
|
|
|
|
|
pn = PromptNode(model_name_or_path=model_name, default_prompt_template=template, api_key=openai_key, max_length=700, model_kwargs={"generation_kwargs": {"do_sample": False, "temperature": 0}}) |
|
|
|
|
|
pipe = Pipeline() |
|
pipe.add_node(component=pn, name="prompt_node", inputs=["Query"]) |
|
|
|
|
|
|
|
st.title('Climate Policy Documents: Vulnerabilities Analysis Q&A (test)') |
|
st.markdown('This tool seeks to provide an interface for quering national climate policy documents (NDCs, LTS etc.). The current version is powered by chatGPT (3.5) and limited to 9 Southern African countries (Angola, Botswana, Eswatini, Lesotho, Malawi, Mozambique, Namibia, South Africa, Zambia, Zimbabwe). The intended use case is to allow users to interact with the documents and obtain valuable insights on various vulnerable groups affected by climate change.') |
|
st.markdown('**DISCLAIMER:** This prototype tool based on LLMs (Language Models) is provided "as is" for experimental and exploratory purposes only, and should not be used for critical or production applications. Users are advised that the tool may contain errors, bugs, or limitations and should be used with caution and awareness of potential risks, and the developers make no warranties or guarantees regarding its performance, reliability, or suitability for any specific purpose.') |
|
|
|
|
|
|
|
country = st.selectbox('Select a country:', country_options) |
|
|
|
|
|
selected_example = st.radio("Example questions", examples) |
|
|
|
if selected_example == "-": |
|
text = st.text_area('Enter your question in the text box below using natural language or select an example from above:') |
|
else: |
|
text = st.text_area('Enter your question in the text box below using natural language or select an example from above:', value=selected_example) |
|
|
|
|
|
if st.button('Submit'): |
|
run_query(text) |