rayyanphysicist commited on
Commit
45d4bbf
·
verified ·
1 Parent(s): 3373c26

Upload 5 files

Browse files
Files changed (5) hide show
  1. .env +2 -0
  2. app.py +172 -0
  3. faiss_index/index.faiss +0 -0
  4. faiss_index/index.pkl +3 -0
  5. requirements.txt +10 -0
.env ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ GOOGLE_API_KEY="AIzaSyB_RB4tirtCyfzcjiIlb4OXasS0kKU24lc"
2
+
app.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
3
+ from langchain.vectorstores import FAISS
4
+ from langchain.chains.question_answering import load_qa_chain
5
+ from langchain.prompts import PromptTemplate
6
+ import google.generativeai as genai
7
+ from dotenv import load_dotenv
8
+ import os
9
+ from ai71 import AI71
10
+ import re
11
+
12
+ # Load environment variables
13
+ load_dotenv()
14
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
15
+ AI71_API_KEY = "api71-api-4cc4a01e-6d44-424a-b28f-38fe59855776"
16
+
17
+ genai.configure(api_key=GOOGLE_API_KEY)
18
+
19
+ # Define the function to load the vector store
20
+ def load_vector_store():
21
+ embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
22
+ vector_store = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
23
+ return vector_store
24
+
25
+ # Define the function to get the conversational chain
26
+ def get_conversational_chain():
27
+ prompt_template = """
28
+ Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
29
+ provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
30
+ Context:\n {context}?\n
31
+ Question: \n{question}\n
32
+
33
+ Answer:
34
+ """
35
+
36
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
37
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
38
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
39
+
40
+ return chain
41
+
42
+ # Define the function to handle user input
43
+ def handle_user_query(user_question):
44
+ vector_store = load_vector_store()
45
+ docs = vector_store.similarity_search(user_question)
46
+
47
+ chain = get_conversational_chain()
48
+
49
+ response = chain(
50
+ {"input_documents": docs, "question": user_question},
51
+ return_only_outputs=True
52
+ )
53
+
54
+ # initial response generator agent
55
+ initial_response = response.get("output_text", "No response generated.")
56
+ # detailed response generator agent
57
+ detailed_response = generate_detailed_response(initial_response, user_question)
58
+ # translator agent
59
+ urdu_response = generate_urdu_response(detailed_response)
60
+
61
+ return detailed_response, urdu_response
62
+
63
+ def clean_detailed_answer(response_text):
64
+ # Remove the "Reply:" prefix at the start
65
+ response_text = re.sub(r'^Reply:\s*', '', response_text, flags=re.IGNORECASE)
66
+
67
+ # Remove the "User:" suffix at the end (if applicable)
68
+ response_text = re.sub(r'\s*User:\s*$', '', response_text, flags=re.IGNORECASE)
69
+
70
+ return response_text
71
+
72
+ # Define the function to generate a detailed response using Falcon LLM with streaming
73
+ def generate_detailed_response(initial_response, question):
74
+ prompt = f"""
75
+ Provide a detailed and relevant explanation based on the initial response. Avoid any apologies or unnecessary prefaces.
76
+
77
+ Initial Response:
78
+ {initial_response}
79
+
80
+ Question:
81
+ {question}
82
+
83
+ Detailed Answer:
84
+ """
85
+
86
+ detailed_answer = ""
87
+ for chunk in AI71(AI71_API_KEY).chat.completions.create(
88
+ model="tiiuae/falcon-180b-chat",
89
+ messages=[
90
+ {"role": "system", "content": "You are a helpful assistant."},
91
+ {"role": "user", "content": prompt}
92
+ ],
93
+ stream=True
94
+ ):
95
+ if chunk.choices[0].delta.content:
96
+ detailed_answer += chunk.choices[0].delta.content
97
+ # Optionally, print each chunk for debugging
98
+ print(chunk.choices[0].delta.content, sep="", end="", flush=True)
99
+ cleaned_answer = clean_detailed_answer(detailed_answer)
100
+ return cleaned_answer
101
+
102
+ # Define the function to generate a response in Urdu using Falcon LLM
103
+ def generate_urdu_response(english_text):
104
+ prompt = f"""
105
+ Translate the following text into Urdu while preserving the meaning and details.
106
+
107
+ English Text:
108
+ {english_text}
109
+
110
+ Urdu Translation:
111
+ """
112
+
113
+ urdu_response = ""
114
+ for chunk in AI71(AI71_API_KEY).chat.completions.create(
115
+ model="tiiuae/falcon-180b-chat",
116
+ messages=[
117
+ {"role": "system", "content": "You are a translation assistant."},
118
+ {"role": "user", "content": prompt}
119
+ ],
120
+ stream=True
121
+ ):
122
+ if chunk.choices[0].delta.content:
123
+ urdu_response += chunk.choices[0].delta.content
124
+ # Optionally, print each chunk for debugging
125
+ print(chunk.choices[0].delta.content, sep="", end="", flush=True)
126
+
127
+ return urdu_response
128
+
129
+ # Define the main function for Streamlit app
130
+ def main():
131
+ st.set_page_config("Chat with PDF")
132
+ st.header("ASK about economic studies")
133
+
134
+ # Initialize session state if it doesn't exist
135
+ if 'history' not in st.session_state:
136
+ st.session_state.history = []
137
+
138
+ # Load the vector store initially
139
+ if 'vector_store' not in st.session_state:
140
+ st.session_state.vector_store = load_vector_store()
141
+
142
+ # Text input for user query
143
+ user_question = st.text_input("Ask a Question")
144
+
145
+ if st.button("Generate Response"):
146
+ if user_question:
147
+ with st.spinner('Generating response, please wait...'):
148
+ english_response, urdu_response = handle_user_query(user_question)
149
+ st.markdown("**English Response:**")
150
+ st.write(english_response)
151
+ st.markdown("**Urdu Translation:**")
152
+ st.write(urdu_response)
153
+ # Add new query and response at the beginning of the history
154
+ st.session_state.history.insert(0, {
155
+ 'user_question': user_question,
156
+ 'english_response': english_response,
157
+ 'urdu_response': urdu_response
158
+ })
159
+
160
+ # Display the history
161
+ if st.session_state.history:
162
+ st.subheader("***----------------------------Response History----------------------------***")
163
+ for entry in st.session_state.history:
164
+ st.markdown("**User's Question:**")
165
+ st.write(entry['user_question'])
166
+ st.markdown("**English Response:**")
167
+ st.write(entry['english_response'])
168
+ st.markdown("**Urdu Translation:**")
169
+ st.write(entry['urdu_response'])
170
+
171
+ if __name__ == "__main__":
172
+ main()
faiss_index/index.faiss ADDED
Binary file (154 kB). View file
 
faiss_index/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:505dd53acb38e528b61efa72d989369cf9b782c53622e5e1443422e5f6be0b3a
3
+ size 501070
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ai71
2
+ streamlit
3
+ python-dotenv
4
+ langchain
5
+ PyPDF2
6
+ faiss-cpu
7
+ langchain_google_genai
8
+ langchain-core
9
+ langchain-community
10
+ google-generativeai