File size: 10,838 Bytes
2497fee
 
 
02e5fcc
2497fee
 
 
 
 
02e5fcc
2497fee
 
 
 
02e5fcc
 
 
 
 
 
 
 
 
 
ba1cc0a
 
02e5fcc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2497fee
 
02e5fcc
 
 
 
 
4777fa9
02e5fcc
4777fa9
 
02e5fcc
 
 
2497fee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96cbd4d
cd5ac54
96cbd4d
 
cd5ac54
 
2497fee
 
cd5ac54
 
 
 
 
 
 
a176cea
cd5ac54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477cb2
cd5ac54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2477cb2
cd5ac54
2477cb2
 
2497fee
 
2477cb2
2497fee
 
 
 
 
 
 
 
 
 
 
8c9e081
6c69f7c
2497fee
 
 
02e5fcc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
import os
import torch
from torch import cuda, bfloat16
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig, StoppingCriteria, StoppingCriteriaList
from langchain.llms import HuggingFacePipeline
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
import gradio as gr
from langchain.embeddings import HuggingFaceEmbeddings


# Load the Hugging Face token from environment
HF_TOKEN = os.environ.get("HF_TOKEN", None)

# Define stopping criteria
class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        for stop_ids in stop_token_ids:
            if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
                return True
        return False

# Load the LLaMA model and tokenizer
# model_id = 'meta-llama/Meta-Llama-3-8B-Instruct'
# model_id= "meta-llama/Llama-2-7b-chat-hf"
model_id="mistralai/Mistral-7B-Instruct-v0.2"
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'

# Set quantization configuration
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type='nf4',
    bnb_4bit_use_double_quant=True,
    bnb_4bit_compute_dtype=bfloat16
)

tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", token=HF_TOKEN, quantization_config=bnb_config)

# Define stopping criteria
stop_list = ['\nHuman:', '\n```\n']
stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list]
stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
stopping_criteria = StoppingCriteriaList([StopOnTokens()])

# Create text generation pipeline
generate_text = pipeline(
    model=model,
    tokenizer=tokenizer,
    return_full_text=True,
    task='text-generation',
    # stopping_criteria=stopping_criteria,
    temperature=0.1,
    max_new_tokens=2048,
    # repetition_penalty=1.1
)

llm = HuggingFacePipeline(pipeline=generate_text)

# Load the stored FAISS index
try:
    vectorstore = FAISS.load_local('faiss_index', HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cuda"}))
    print("Loaded embedding successfully")
except ImportError as e:
    print("FAISS could not be imported. Make sure FAISS is installed correctly.")
    raise e

# Set up the Conversational Retrieval Chain
chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True)

chat_history = []

def format_prompt(query):
    prompt = f"""
    You are a knowledgeable assistant with access to a comprehensive database. 
    I need you to answer my question and provide related information in a specific format.
    Here's what I need:
    A brief, general response to my question based on related answers retrieved.
    Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point.
    
    A JSON-formatted output containing: ALL SOURCE DOCUMENTS
       - "question": The ticketName
       - "answer": The Responses
    Here's my question:
    {query}
    """
       
    #    - "related_questions": A list of related questions and their answers, each as a dictionary with the keys. Consider all source documents:
    #      - "question": The related question.
    #      - "answer": The related answer.
    
    

    # Example 1:
    # {{
    #     "question": "How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM",
    #     "answer": "To use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM, you need to modify the configuration file of the NDK application. Specifically, change the processor reference from 'A15_0' to 'IPU1_0'.",
    #     "related_questions": [
    #         {{
    #             "question": "Can you provide MLBP documentation on TDA2?",
    #             "answer": "MLB is documented for DRA devices in the TRM book, chapter 24.12."
    #         }},
    #         {{
    #             "question": "Hi, could you share me the TDA2x documents about Security(SPRUHS7) and Cryptographic(SPRUHS8) addendums?",
    #             "answer": "Most of TDA2 documents are on ti.com under the product folder."
    #         }},
    #         {{
    #             "question": "Is any one can provide us a way to access CDDS for nessary docs?",
    #             "answer": "Which document are you looking for?"
    #         }},
    #         {{
    #             "question": "What can you tell me about the TDA2 and TDA3 processors? Can they / do they run Linux?",
    #             "answer": "We have moved your post to the appropriate forum."
    #         }}
    #     ]
    # }}

    # Final Answer: To use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM, you need to modify the configuration file of the NDK application. Specifically, change the processor reference from 'A15_0' to 'IPU1_0'.
    
    # Example 2:
    # {{
    #     "question": "Can BQ25896 support I2C interface?",
    #     "answer": "Yes, the BQ25896 charger supports the I2C interface for communication.",
    #     "related_questions": [
    #         {{
    #             "question": "What are the main features of BQ25896?",
    #             "answer": "The BQ25896 features include high-efficiency, fast charging capability, and a wide input voltage range."
    #         }},
    #         {{
    #             "question": "How to configure the BQ25896 for USB charging?",
    #             "answer": "To configure the BQ25896 for USB charging, set the input current limit and the charging current via I2C registers."
    #         }}
    #     ]
    # }}

    # Final Answer: Yes, the BQ25896 charger supports the I2C interface for communication.
    
    # """

    
    return prompt


def qa_infer(query):
    formatted_prompt = format_prompt(query)
    result = chain({"question": formatted_prompt, "chat_history": chat_history})
    for doc in result['source_documents']:
        print("-"*50)
        print("Retrieved Document:", doc.page_content)
    print("#"*100)
    print(result['answer'])
    return result['answer']

EXAMPLES = ["How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", 
            "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?", 
            "Master core in TDA2XX is a15 and in TDA3XX it is m4,so we have to shift all modules that are being used by a15 in TDA2XX to m4 in TDA3xx."]

demo = gr.Interface(fn=qa_infer, inputs="text", allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs="text")
demo.launch()

# import os
# import torch
# from torch import cuda, bfloat16
# from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, BitsAndBytesConfig, StoppingCriteria, StoppingCriteriaList
# from langchain.llms import HuggingFacePipeline
# from langchain.vectorstores import FAISS
# from langchain.chains import ConversationalRetrievalChain
# import gradio as gr
# from langchain.embeddings import HuggingFaceEmbeddings

# # Load the Hugging Face token from environment
# HF_TOKEN = os.environ.get("HF_TOKEN", None)

# # Define stopping criteria
# class StopOnTokens(StoppingCriteria):
#     def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
#         for stop_ids in stop_token_ids:
#             if torch.eq(input_ids[0][-len(stop_ids):], stop_ids).all():
#                 return True
#         return False

# # Load the LLaMA model and tokenizer
# model_id = 'meta-llama/Meta-Llama-3-8B-Instruct'
# device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'

# # Set quantization configuration
# bnb_config = BitsAndBytesConfig(
#     load_in_4bit=True,
#     bnb_4bit_quant_type='nf4',
#     bnb_4bit_use_double_quant=True,
#     bnb_4bit_compute_dtype=bfloat16
# )

# tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
# model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", token=HF_TOKEN, quantization_config=bnb_config)

# # Define stopping criteria
# stop_list = ['\nHuman:', '\n```\n']
# stop_token_ids = [tokenizer(x)['input_ids'] for x in stop_list]
# stop_token_ids = [torch.LongTensor(x).to(device) for x in stop_token_ids]
# stopping_criteria = StoppingCriteriaList([StopOnTokens()])

# # Create text generation pipeline
# generate_text = pipeline(
#     model=model,
#     tokenizer=tokenizer,
#     return_full_text=True,
#     task='text-generation',
#     stopping_criteria=stopping_criteria,
#     temperature=0.1,
#     max_new_tokens=512,
#     repetition_penalty=1.1
# )

# llm = HuggingFacePipeline(pipeline=generate_text)

# # Load the stored FAISS index
# try:
#     embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2", model_kwargs={"device": "cuda"})
#     vectorstore = FAISS.load_local('faiss_index', embeddings)
#     print("Loaded embedding successfully")
# except ImportError as e:
#     print("FAISS could not be imported. Make sure FAISS is installed correctly.")
#     raise e

# # Set up the Conversational Retrieval Chain
# chain = ConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(), return_source_documents=True)

# chat_history = []

# def format_prompt(query):
#     prompt = f"""
#     You are a knowledgeable assistant with access to a comprehensive database. 
#     I need you to answer my question and provide related information in a specific format.
#     Here's what I need:
#     1. A brief, general response to my question based on related answers retrieved.
#     2. A JSON-formatted output containing:
#        - "question": The original question.
#        - "answer": The detailed answer.
#        - "related_questions": A list of related questions and their answers, each as a dictionary with the keys:
#          - "question": The related question.
#          - "answer": The related answer.
#     Here's my question:
#     {query}
#     Include a brief final answer without additional comments, sign-offs, or extra phrases. Be direct and to the point.
#     """
#     return prompt

# def qa_infer(query):
#     formatted_prompt = format_prompt(query)
#     result = chain({"question": formatted_prompt, "chat_history": chat_history})
#     return result['answer']

# EXAMPLES = ["How to use IPU1_0 instead of A15_0 to process NDK in TDA2x-EVM", 
#             "Can BQ25896 support I2C interface?", 
#             "Does TDA2 vout support bt656 8-bit mode?"]

# demo = gr.Interface(fn=qa_infer, inputs="text", allow_flagging='never', examples=EXAMPLES, cache_examples=False, outputs="text")
# demo.launch()