CTP-week3-demo / app.py
evanperez's picture
Update app.py
cf3dcdd verified
raw
history blame
3.54 kB
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
import os
st.set_page_config(page_title="RAG Demo - Evan Perez", layout ="wide")
api_key = 'AIzaSyCvXRggpO2yNwIpZmoMy_5Xhm2bDyD-pOo'
os.mkdir('faiss_index')
import subprocess
# Read requirements.txt file
with open('requirements.txt', 'r') as f:
packages = f.read().splitlines()
# Install packages
for package in packages:
subprocess.call(['pip', 'install', package])
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=450, chunk_overlap=50)
chunks = text_splitter.split_text(text)
return chunks
def get_vector_store(text_chunks, api_key):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key)
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
vector_store.save_local("faiss_index")
def get_conversational_chain():
prompt_template = """
Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
provided context just say, "answer is not available in the context", don't provide the wrong answer. When giving an answer, try to include all mentionings of the subject being asked and include this within your response\n\n
Context:\n {context}?\n
Question: \n{question}\n
Answer:
"""
model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.2, google_api_key=api_key)
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
return chain
def user_input(user_question, api_key):
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key)
new_db = FAISS.load_local("faiss_index", embeddings)
docs = new_db.similarity_search(user_question)
chain = get_conversational_chain()
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
st.write("Reply: ", response["output_text"])
def main():
st.header("RAG based LLM Applicatoin")
user_question = st.text_input("Ask a Question from the PDF Files", key="user_question")
if user_question and api_key: # Ensure API key and user question are provided
user_input(user_question, api_key)
with st.sidebar:
st.title("Menu:")
pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True, key="pdf_uploader")
if st.button("Submit & Process", key="process_button") and api_key: # Check if API key is provided before processing
with st.spinner("Processing..."):
raw_text = get_pdf_text(pdf_docs)
text_chunks = get_text_chunks(raw_text)
get_vector_store(text_chunks, api_key)
st.success("Done")
if __name__ == "__main__":
main()