Spaces:
Running
Running
File size: 4,583 Bytes
b39298d 6f4225b b39298d 6f4225b b39298d 6f4225b b39298d 6fc35ba 6f4225b d04a9f1 6f4225b b39298d 6f4225b b39298d 6f4225b b39298d 6f4225b b39298d 6f4225b b39298d 6f4225b c87b467 6f4225b 6fc35ba 6f4225b 5454d65 6fc35ba 1d758a2 b369983 1d758a2 6f4225b 0b805c3 6f4225b 1d758a2 5454d65 5b9bb83 5454d65 1d758a2 6f4225b 1d758a2 6f4225b 1d758a2 5454d65 1d758a2 6fc35ba 5454d65 1d758a2 5454d65 5b9bb83 5454d65 11114b2 5454d65 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
import streamlit as st
from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate
from llama_index.llms.huggingface import HuggingFaceInferenceAPI
from dotenv import load_dotenv
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import Settings
import os
import base64
# Load environment variables
load_dotenv()
icons = {"assistant": "👽", "user": "👦🏻"}
# Configure the Llama index settings
Settings.llm = HuggingFaceInferenceAPI(
model_name="meta-llama/Meta-Llama-3-8B-Instruct",
tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
context_window=3900,
token=os.getenv("HF_TOKEN"),
max_new_tokens=1000,
generate_kwargs={"temperature": 0.1},
)
Settings.embed_model = HuggingFaceEmbedding(
model_name="BAAI/bge-small-en-v1.5"
)
# Define the directory for persistent storage and data
PERSIST_DIR = "./db"
DATA_DIR = "data"
# Ensure data directory exists
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(PERSIST_DIR, exist_ok=True)
def displayPDF(file):
with open(file, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode('utf-8')
pdf_display = f'<iframe src="data:application/pdf;base64,{base64_pdf}" width="100%" height="600" type="application/pdf"></iframe>'
st.markdown(pdf_display, unsafe_allow_html=True)
def data_ingestion():
documents = SimpleDirectoryReader(DATA_DIR).load_data()
storage_context = StorageContext.from_defaults()
index = VectorStoreIndex.from_documents(documents)
index.storage_context.persist(persist_dir=PERSIST_DIR)
def handle_query(query):
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
index = load_index_from_storage(storage_context)
chat_text_qa_msgs = [
(
"user",
"""You are Q&A assistant named CHATTO, created by Pachaiappan an AI Specialist. Your main goal is to provide answers as accurately as possible, based on the instructions and context you have been given. If a question does not match the provided context or is outside the scope of the document, kindly advise the user to ask questions within the context of the document.
Context:
{context_str}
Question:
{query_str}
"""
)
]
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
query_engine = index.as_query_engine(text_qa_template=text_qa_template)
answer = query_engine.query(query)
if hasattr(answer, 'response'):
return answer.response
elif isinstance(answer, dict) and 'response' in answer:
return answer['response']
else:
return "Sorry, I couldn't find an answer."
# Streamlit app initialization
st.title("Chat with your PDF 🦜📄")
st.markdown("Built by [Pachaiappan❤️](https://github.com/Mr-Vicky-01)")
st.markdown("chat here👇")
if 'messages' not in st.session_state:
st.session_state.messages = [{'role': 'assistant', "content": 'Hello! Upload a PDF and ask me anything about its content.'}]
for message in st.session_state.messages:
with st.chat_message(message['role'], avatar=icons[message['role']]):
st.write(message['content'])
with st.sidebar:
st.title("Menu:")
uploaded_file = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button")
if st.button("Submit & Process"):
with st.spinner("Processing..."):
filepath = "data/saved_pdf.pdf"
with open(filepath, "wb") as f:
f.write(uploaded_file.getbuffer())
# displayPDF(filepath) # Display the uploaded PDF
data_ingestion() # Process PDF every time new file is uploaded
st.success("Done")
user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
# if user_prompt:
# st.session_state.messages.append({'role': 'user', "content": user_prompt})
# response = handle_query(user_prompt)
# st.session_state.messages.append({'role': 'assistant', "content": response})
if user_prompt and uploaded_file:
st.session_state.messages.append({'role': 'user', "content": user_prompt})
with st.chat_message("user", avatar="👦🏻"):
st.write(user_prompt)
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant", avatar="👽"):
response = handle_query(user_prompt)
full_response = st.write(response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message) |