File size: 6,422 Bytes
45d4bbf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
import streamlit as st
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
import google.generativeai as genai
from dotenv import load_dotenv
import os
from ai71 import AI71
import re

# Load environment variables
load_dotenv()
GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
AI71_API_KEY = "api71-api-4cc4a01e-6d44-424a-b28f-38fe59855776"

genai.configure(api_key=GOOGLE_API_KEY)

# Define the function to load the vector store
def load_vector_store():
    embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
    vector_store = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
    return vector_store

# Define the function to get the conversational chain
def get_conversational_chain():
    prompt_template = """

    Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in

    provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n

    Context:\n {context}?\n

    Question: \n{question}\n



    Answer:

    """

    model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
    prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
    chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)

    return chain

# Define the function to handle user input
def handle_user_query(user_question):
    vector_store = load_vector_store()
    docs = vector_store.similarity_search(user_question)

    chain = get_conversational_chain()

    response = chain(
        {"input_documents": docs, "question": user_question},
        return_only_outputs=True
    )

    # initial response generator agent
    initial_response = response.get("output_text", "No response generated.")
    # detailed response generator agent
    detailed_response = generate_detailed_response(initial_response, user_question)
    # translator agent
    urdu_response = generate_urdu_response(detailed_response)
    
    return detailed_response, urdu_response

def clean_detailed_answer(response_text):
    # Remove the "Reply:" prefix at the start
    response_text = re.sub(r'^Reply:\s*', '', response_text, flags=re.IGNORECASE)
    
    # Remove the "User:" suffix at the end (if applicable)
    response_text = re.sub(r'\s*User:\s*$', '', response_text, flags=re.IGNORECASE)
    
    return response_text

# Define the function to generate a detailed response using Falcon LLM with streaming
def generate_detailed_response(initial_response, question):
    prompt = f"""

    Provide a detailed and relevant explanation based on the initial response. Avoid any apologies or unnecessary prefaces.



    Initial Response:

    {initial_response}



    Question:

    {question}



    Detailed Answer:

    """
    
    detailed_answer = ""
    for chunk in AI71(AI71_API_KEY).chat.completions.create(
        model="tiiuae/falcon-180b-chat",
        messages=[
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt}
        ],
        stream=True
    ):
        if chunk.choices[0].delta.content:
            detailed_answer += chunk.choices[0].delta.content
            # Optionally, print each chunk for debugging
            print(chunk.choices[0].delta.content, sep="", end="", flush=True)
    cleaned_answer = clean_detailed_answer(detailed_answer)
    return cleaned_answer

# Define the function to generate a response in Urdu using Falcon LLM
def generate_urdu_response(english_text):
    prompt = f"""

    Translate the following text into Urdu while preserving the meaning and details.



    English Text:

    {english_text}



    Urdu Translation:

    """
    
    urdu_response = ""
    for chunk in AI71(AI71_API_KEY).chat.completions.create(
        model="tiiuae/falcon-180b-chat",
        messages=[
            {"role": "system", "content": "You are a translation assistant."},
            {"role": "user", "content": prompt}
        ],
        stream=True
    ):
        if chunk.choices[0].delta.content:
            urdu_response += chunk.choices[0].delta.content
            # Optionally, print each chunk for debugging
            print(chunk.choices[0].delta.content, sep="", end="", flush=True)
    
    return urdu_response

# Define the main function for Streamlit app
def main():
    st.set_page_config("Chat with PDF")
    st.header("ASK about economic studies")

    # Initialize session state if it doesn't exist
    if 'history' not in st.session_state:
        st.session_state.history = []

    # Load the vector store initially
    if 'vector_store' not in st.session_state:
        st.session_state.vector_store = load_vector_store()
    
    # Text input for user query
    user_question = st.text_input("Ask a Question")

    if st.button("Generate Response"):
        if user_question:
            with st.spinner('Generating response, please wait...'):
                english_response, urdu_response = handle_user_query(user_question)
                st.markdown("**English Response:**")
                st.write(english_response)
                st.markdown("**Urdu Translation:**")
                st.write(urdu_response)
                # Add new query and response at the beginning of the history
                st.session_state.history.insert(0, {
                    'user_question': user_question,
                    'english_response': english_response,
                    'urdu_response': urdu_response
                })

    # Display the history
    if st.session_state.history:
        st.subheader("***----------------------------Response History----------------------------***")
        for entry in st.session_state.history:
            st.markdown("**User's Question:**")
            st.write(entry['user_question'])
            st.markdown("**English Response:**")
            st.write(entry['english_response'])
            st.markdown("**Urdu Translation:**")
            st.write(entry['urdu_response'])

if __name__ == "__main__":
    main()