File size: 10,783 Bytes
c65ba42
b2ba33f
 
3cba93e
c119679
3cba93e
 
 
 
 
d4b9099
c119679
b2ba33f
3c6573c
d4b9099
 
f932d05
c119679
 
b2ba33f
3cba93e
495b986
d4b9099
b2ba33f
c119679
 
b2ba33f
 
495b986
b2ba33f
3cba93e
 
 
 
b2ba33f
 
 
495b986
3cba93e
 
 
495b986
 
 
c119679
 
d4b9099
c119679
 
 
 
 
 
d4b9099
 
 
 
 
c119679
d4b9099
c119679
3cba93e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2ba33f
e085441
 
495b986
b2ba33f
e085441
495b986
3cba93e
495b986
 
 
 
3cba93e
 
495b986
 
 
b2ba33f
c2726a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2ba33f
e085441
 
 
 
 
 
 
b2ba33f
e085441
b2ba33f
e085441
 
b2ba33f
e085441
c2726a4
 
 
 
e085441
c2726a4
 
 
 
 
 
 
 
 
 
 
e085441
b2ba33f
3cba93e
c2726a4
3cba93e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2ba33f
c2726a4
3cba93e
c2726a4
3cba93e
 
 
 
 
 
 
 
 
 
 
 
 
 
c2726a4
 
 
 
3cba93e
c119679
 
b2ba33f
 
3c6573c
c65ba42
495b986
b2ba33f
c119679
 
3cba93e
495b986
3cba93e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c119679
3cba93e
 
 
 
 
 
 
6c24666
3cba93e
 
f932d05
3cba93e
f932d05
3cba93e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
import os
import multiprocessing
import concurrent.futures
from langchain.document_loaders import TextLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from sentence_transformers import SentenceTransformer
import faiss
import torch
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
from datetime import datetime
import json
import gradio as gr
import re
from threading import Thread

class DocumentRetrievalAndGeneration:
    def __init__(self, embedding_model_name, lm_model_id, data_folder):
        self.all_splits = self.load_documents(data_folder)
        self.embeddings = SentenceTransformer(embedding_model_name)
        self.gpu_index = self.create_faiss_index()
        self.tokenizer, self.model = self.initialize_llm(lm_model_id)
        self.retriever_tool = self.create_retriever_tool()

    def load_documents(self, folder_path):
        loader = DirectoryLoader(folder_path, loader_cls=TextLoader)
        documents = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250)
        all_splits = text_splitter.split_documents(documents)
        print('Length of documents:', len(documents))
        print("LEN of all_splits", len(all_splits))
        for i in range(3):
            print(all_splits[i].page_content)
        return all_splits

    def create_faiss_index(self):
        all_texts = [split.page_content for split in self.all_splits]
        embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy()
        index = faiss.IndexFlatL2(embeddings.shape[1])
        index.add(embeddings)
        gpu_resource = faiss.StandardGpuResources()
        gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index)
        return gpu_index

    def initialize_llm(self, model_id):
        quantization_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16
        )
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        model = AutoModelForCausalLM.from_pretrained(
            model_id,
            torch_dtype=torch.bfloat16,
            device_map="auto",
            quantization_config=quantization_config
        )
        return tokenizer, model

    def generate_response_with_timeout(self, input_ids, max_new_tokens=1000):
        try:
            streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True)
            generate_kwargs = dict(
                input_ids=input_ids,
                max_new_tokens=max_new_tokens,
                do_sample=True,
                top_p=1.0,
                top_k=20,
                temperature=0.8,
                repetition_penalty=1.2,
                eos_token_id=[128001, 128008, 128009],
                streamer=streamer,
            )
            
            thread = Thread(target=self.model.generate, kwargs=generate_kwargs)
            thread.start()
            
            generated_text = ""
            for new_text in streamer:
                generated_text += new_text
            
            return generated_text
        except Exception as e:
            print(f"Error in generate_response_with_timeout: {str(e)}")
            return "Text generation process encountered an error"

    def create_retriever_tool(self):
        class RetrieverTool:
            def __init__(self, parent):
                self.parent = parent

            def run(self, query: str) -> str:
                similarityThreshold = 1
                query_embedding = self.parent.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
                distances, indices = self.parent.gpu_index.search(np.array([query_embedding]), k=3)
                content = ""
                for idx, distance in zip(indices[0], distances[0]):
                    if distance <= similarityThreshold:
                        content += "-" * 50 + "\n"
                        content += self.parent.all_splits[idx].page_content + "\n"
                return content

        return RetrieverTool(self)

    def run_standard_rag(self, query: str, content: str) -> str:
        conversation = [
            {"role": "system", "content": "You are a knowledgeable assistant with access to a comprehensive database."},
            {"role": "user", "content": f"""
            I need you to answer my question and provide related information in a specific format.
            I have provided five relatable json files {content}, choose the most suitable chunks for answering the query.
            RETURN ONLY SOLUTION without additional comments, sign-offs, retrived chunks, refrence to any Ticket or extra phrases. Be direct and to the point.
            IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE".
            DO NOT GIVE REFRENCE TO ANY CHUNKS OR TICKETS,BE ON POINT.
            
            Here's my question:
            Query: {query}
            Solution==>
            """}
        ]
        input_ids = self.tokenizer.apply_chat_template(conversation, return_tensors="pt").to(self.model.device)
        return self.generate_response_with_timeout(input_ids)

    def run_agentic_rag(self, question: str) -> str:
        retriever_output = self.retriever_tool.run(question)
        
        enhanced_prompt = f"""Using the following information retrieved from the knowledge base:

{retriever_output}

Give a comprehensive answer to the question below.
Respond only to the question asked, be concise and relevant.
If you can't find information, say "No relevant information found."

Question: {question}
Answer:"""

        input_ids = self.tokenizer.encode(enhanced_prompt, return_tensors="pt").to(self.model.device)
        return self.generate_response_with_timeout(input_ids)

    def run_analytical_rag(self, question: str) -> str:
        retriever_output = self.retriever_tool.run(question)
        
        enhanced_prompt = f"""Using the following information retrieved from the knowledge base:

{retriever_output}

Provide a detailed, step-by-step analysis of the question below. Break down the problem, consider different aspects, and provide a thorough explanation. If relevant information is missing, state what additional data would be needed for a complete analysis.

Question: {question}
Analysis:
1. """

        input_ids = self.tokenizer.encode(enhanced_prompt, return_tensors="pt").to(self.model.device)
        return self.generate_response_with_timeout(input_ids)

    def query_and_generate_response(self, query):
        # Retrieval step
        similarityThreshold = 1
        query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
        distances, indices = self.gpu_index.search(np.array([query_embedding]), k=3)
        print("Distance", distances, "indices", indices)
        content = ""
        filtered_results = []
        for idx, distance in zip(indices[0], distances[0]):
            if distance <= similarityThreshold:
                filtered_results.append(idx)
            for i in filtered_results:
                print(self.all_splits[i].page_content)
            content += "-" * 50 + "\n"
            content += self.all_splits[idx].page_content + "\n"
            print("CHUNK", idx)
            print("Distance:", distance)
            print("indices:", indices)
            print(self.all_splits[idx].page_content)
            print("############################")

        # Standard RAG
        start_time = datetime.now()
        standard_response = self.run_standard_rag(query, content)
        elapsed_time = datetime.now() - start_time
        print("Generated standard response:", standard_response)
        print("Time elapsed:", elapsed_time)
        print("Device in use:", self.model.device)

        standard_solution_text = standard_response.strip()
        if "Solution:" in standard_solution_text:
            standard_solution_text = standard_solution_text.split("Solution:", 1)[1].strip()
        standard_solution_text = re.sub(r'^assistant\s*', '', standard_solution_text, flags=re.IGNORECASE)
        standard_solution_text = standard_solution_text.strip()

        # Agentic RAG
        agentic_solution_text = self.run_agentic_rag(query)

        # Analytical RAG
        analytical_solution_text = self.run_analytical_rag(query)

        combined_solution = f"Standard RAG Solution:\n{standard_solution_text}\n\nAgentic RAG Solution:\n{agentic_solution_text}\n\nAnalytical RAG Solution:\n{analytical_solution_text}"
        return combined_solution, content

    def qa_infer_gradio(self, query):
        response = self.query_and_generate_response(query)
        return response

if __name__ == "__main__":
    embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12'
    lm_model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
    data_folder = 'sample_embedding_folder2'

    doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder)

    def launch_interface():
        css_code = """
            .gradio-container {
                background-color: #daccdb;
            }
            button {
                background-color: #927fc7;
                color: black;
                border: 1px solid black;
                padding: 10px;
                margin-right: 10px;
                font-size: 16px;
                font-weight: bold;
            }
        """
        EXAMPLES = [
            "On which devices can the VIP and CSI2 modules operate simultaneously?", 
            "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", 
            "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"
        ]

        interface = gr.Interface(
            fn=doc_retrieval_gen.qa_infer_gradio,
            inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
            allow_flagging='never',
            examples=EXAMPLES,
            cache_examples=False,
            outputs=[gr.Textbox(label="RESPONSE"), gr.Textbox(label="RELATED QUERIES")],
            css=css_code,
            title="TI E2E FORUM"
        )

        interface.launch(debug=True)

    launch_interface()