syedmudassir16's picture
Update app.py
f932d05 verified
raw
history blame
8.51 kB
import os
import multiprocessing
import concurrent.futures
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
from datetime import datetime
import json
import gradio as gr
import re
from threading import Thread
from transformers.agents import Tool, HfEngine, ReactJsonAgent
from huggingface_hub import InferenceClient
import logging
import torch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
try:
from langchain_community.vectorstores import FAISS
except ImportError:
logger.error("Failed to import FAISS. Make sure it's installed correctly.")
logger.info("You can try: pip install faiss-cpu --no-cache")
FAISS = None
class DocumentRetrievalAndGeneration:
def __init__(self, embedding_model_name, lm_model_id, data_folder):
self.all_splits = self.load_documents(data_folder)
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
if FAISS is not None:
self.vectordb = self.create_faiss_index()
else:
logger.warning("FAISS is not available. Vector search functionality will be limited.")
self.vectordb = None
self.tokenizer, self.model = self.initialize_llm(lm_model_id)
self.retriever_tool = self.create_retriever_tool()
self.agent = self.create_agent()
def load_documents(self, folder_path):
loader = DirectoryLoader(folder_path, loader_cls=TextLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=20)
all_splits = text_splitter.split_documents(documents)
logger.info(f'Loaded {len(documents)} documents')
logger.info(f"Split into {len(all_splits)} chunks")
return all_splits
def create_faiss_index(self):
if FAISS is None:
logger.error("FAISS is not available. Cannot create index.")
return None
return FAISS.from_documents(self.all_splits, self.embeddings)
def initialize_llm(self, model_id):
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
model_id,
torch_dtype=torch.bfloat16,
device_map="auto",
quantization_config=quantization_config
)
return tokenizer, model
def create_retriever_tool(self):
class RetrieverTool(Tool):
name = "retriever"
description = "Retrieves documents from the knowledge base that are semantically similar to the input query."
inputs = {
"query": {
"type": "text",
"description": "The query to perform. Use affirmative form rather than a question.",
}
}
output_type = "text"
def __init__(self, vectordb, **kwargs):
super().__init__(**kwargs)
self.vectordb = vectordb
def forward(self, query: str) -> str:
if self.vectordb is None:
return "Vector database is not available. Cannot perform retrieval."
docs = self.vectordb.similarity_search(query, k=3)
return "\nRetrieved documents:\n" + "".join(
[f"===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs)]
)
return RetrieverTool(self.vectordb)
def create_agent(self):
llm_engine = HfEngine("meta-llama/Meta-Llama-3.1-8B-Instruct")
return ReactJsonAgent(tools=[self.retriever_tool], llm_engine=llm_engine, max_iterations=4, verbose=2)
def run_agentic_rag(self, question: str) -> str:
enhanced_question = f"""Using the information in your knowledge base, accessible with the 'retriever' tool,
give a comprehensive answer to the question below.
Respond only to the question asked, be concise and relevant.
If you can't find information, try calling your retriever again with different arguments.
Make sure to cover the question completely by calling the retriever tool several times with semantically different queries.
Your queries should be in affirmative form, not questions.
Question:
{question}"""
return self.agent.run(enhanced_question)
def run_standard_rag(self, question: str) -> str:
context = self.retriever_tool(query=question)
prompt = f"""Given the question and supporting documents below, give a comprehensive answer to the question.
Respond only to the question asked, be concise and relevant.
Provide the number of the source document when relevant.
Question:
{question}
{context}
"""
messages = [{"role": "user", "content": prompt}]
reader_llm = InferenceClient("meta-llama/Meta-Llama-3.1-8B-Instruct")
return reader_llm.chat_completion(messages).choices[0].message.content
def query_and_generate_response(self, query):
agentic_answer = self.run_agentic_rag(query)
standard_answer = self.run_standard_rag(query)
combined_answer = f"Agentic RAG Answer:\n{agentic_answer}\n\nStandard RAG Answer:\n{standard_answer}"
return combined_answer, "" # Return empty string for 'content' as it's not used in this implementation
def qa_infer_gradio(self, query):
response = self.query_and_generate_response(query)
return response
def save_index(self, path):
if self.vectordb is not None:
self.vectordb.save_local(path)
else:
logger.warning("Vector database is not available. Cannot save index.")
def load_index(self, path):
if FAISS is not None:
self.vectordb = FAISS.load_local(path, self.embeddings)
else:
logger.warning("FAISS is not available. Cannot load index.")
if __name__ == "__main__":
embedding_model_name = 'thenlper/gte-small'
lm_model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
data_folder = 'sample_embedding_folder2'
try:
doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder)
# Save the index for future use
doc_retrieval_gen.save_index("faiss_index")
def launch_interface():
css_code = """
.gradio-container {
background-color: #daccdb;
}
button {
background-color: #927fc7;
color: black;
border: 1px solid black;
padding: 10px;
margin-right: 10px;
font-size: 16px;
font-weight: bold;
}
"""
EXAMPLES = [
"On which devices can the VIP and CSI2 modules operate simultaneously?",
"I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?",
"Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"
]
interface = gr.Interface(
fn=doc_retrieval_gen.qa_infer_gradio,
inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
allow_flagging='never',
examples=EXAMPLES,
cache_examples=False,
outputs=[gr.Textbox(label="RESPONSE"), gr.Textbox(label="RELATED QUERIES")],
css=css_code,
title="TI E2E FORUM Multi-Agent RAG"
)
interface.launch(debug=True)
launch_interface()
except Exception as e:
logger.error(f"An error occurred: {str(e)}")
logger.info("Please check your environment setup and try again.")