Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,250 @@
|
|
1 |
-
|
2 |
-
from
|
|
|
3 |
|
4 |
-
"""
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"""
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
"""
|
45 |
-
demo = gr.ChatInterface(
|
46 |
-
respond,
|
47 |
-
additional_inputs=[
|
48 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
49 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
50 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
51 |
-
gr.Slider(
|
52 |
-
minimum=0.1,
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
-
)
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
|
63 |
-
|
|
|
1 |
+
from langchain_core.prompts import ChatPromptTemplate
|
2 |
+
from langchain.prompts import PromptTemplate
|
3 |
+
from langchain_core.output_parsers import StrOutputParser
|
4 |
|
5 |
+
"""### **Set up the enviroment**"""
|
6 |
+
|
7 |
+
import os
|
8 |
+
from langchain_community.llms import Together
|
9 |
+
os.environ["TOGETHER_API_KEY"] = "YOUR KEY HERE"
|
10 |
+
|
11 |
+
"""### **part 2: Data Collection and Preprocessing**"""
|
12 |
+
|
13 |
+
import fitz # PyMuPDF
|
14 |
+
|
15 |
+
def extract_and_split_pdf(pdf_path, split_key="ENDOFTUT"):
|
16 |
+
combined_list = []
|
17 |
+
|
18 |
+
# Open the PDF file
|
19 |
+
document = fitz.open(pdf_path)
|
20 |
+
|
21 |
+
# Extract text from each page
|
22 |
+
all_text = ""
|
23 |
+
for page_num in range(document.page_count):
|
24 |
+
page = document.load_page(page_num)
|
25 |
+
all_text += page.get_text()
|
26 |
+
|
27 |
+
# Split the text by the key
|
28 |
+
combined_list = all_text.split(split_key)
|
29 |
+
|
30 |
+
return combined_list
|
31 |
+
|
32 |
+
# Example usage
|
33 |
+
pdf_path = "Mech-chunks.pdf"
|
34 |
+
combined_list = extract_and_split_pdf(pdf_path)
|
35 |
+
|
36 |
+
#take json file and make each q&a in single cell in the list
|
37 |
+
import json
|
38 |
+
|
39 |
+
# Load the JSON file
|
40 |
+
with open('output_data (1).json', 'r') as file:
|
41 |
+
data = json.load(file)
|
42 |
+
|
43 |
+
|
44 |
+
for item in data:
|
45 |
+
question = item.get("question", "")
|
46 |
+
answer = item.get("answer", "")
|
47 |
+
combined_list.append(f"Q: {question}?\nA: {answer}\n")
|
48 |
+
|
49 |
+
"""### **part 3: save embeddings in a vector store**"""
|
50 |
+
|
51 |
+
from langchain.vectorstores import FAISS
|
52 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
53 |
+
from langchain.schema import Document
|
54 |
+
|
55 |
+
db = FAISS.from_texts(combined_list,
|
56 |
+
HuggingFaceEmbeddings(model_name='BAAI/bge-base-en-v1.5'))
|
57 |
+
|
58 |
+
"""### **part 4: create the retriever**"""
|
59 |
+
|
60 |
+
retriever = db.as_retriever(
|
61 |
+
search_type="similarity",
|
62 |
+
search_kwargs={'k': 4}
|
63 |
+
)
|
64 |
+
|
65 |
+
# Manually test retrieval to verify correct chunk fetching
|
66 |
+
query = "what are the tools needed to change Brake Discs on a Car?"
|
67 |
+
retrieved_docs = db.similarity_search(query, k=5)
|
68 |
+
|
69 |
+
# Print out the content of retrieved documents to verify
|
70 |
+
for idx, doc in enumerate(retrieved_docs):
|
71 |
+
print(f"Document {idx + 1}:", doc.page_content)
|
72 |
+
|
73 |
+
"""### **part 5: load the llama model**"""
|
74 |
+
|
75 |
+
llm = Together(
|
76 |
+
|
77 |
+
model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
78 |
+
max_tokens=512,
|
79 |
+
temperature=0.2,
|
80 |
+
#top_p=0.7,
|
81 |
+
#top_k=50,
|
82 |
+
repetition_penalty=1.1,
|
83 |
+
)
|
84 |
+
|
85 |
+
"""### **part 7:Load the Llama Guard model**"""
|
86 |
+
|
87 |
+
Llama_guard = Together(
|
88 |
+
|
89 |
+
model="meta-llama/Meta-Llama-Guard-3-8B",
|
90 |
+
max_tokens=10,
|
91 |
+
temperature=0.2,
|
92 |
+
#top_p=0.7,
|
93 |
+
#top_k=50,
|
94 |
+
repetition_penalty=1.1,
|
95 |
+
)
|
96 |
|
97 |
+
"""### **part 8: Moderating Chat Messages with a Template and Guard Model**"""
|
98 |
|
99 |
+
guard_temp_ans = """
|
100 |
+
<|system|>
|
101 |
+
Task: Check if there is unsafe content in conversations according our safety policy with the below categories.
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
+
<BEGIN UNSAFE CONTENT CATEGORIES>
|
104 |
+
1: Violence and Hate.
|
105 |
+
Should not
|
106 |
+
- Help people plan or engage in violence.
|
107 |
+
- Encourage people to engage in violence.
|
108 |
+
- Express hateful, derogatory, or demeaning sentiments against people based on sensitive personal characteristics like their race, color, religion, national origin, sexual orientation, gender, gender identity, or disability.
|
109 |
+
- Encourage discrimination against people based on those characteristics and should not use slurs or create insults based on those characteristics.
|
110 |
+
- Provide information that can be used to promote or engage in violence, hate, or discrimination.
|
111 |
+
- Provide guidance or instructions on how to engage in violent or hateful behavior, except if related to 3D and Blender creation for artistic purposes.
|
112 |
|
113 |
+
2: Sexual Content.
|
114 |
+
Should not
|
115 |
+
- Engage in sexually explicit (i.e., erotic) conversations and should not encourage people to engage in sexual activities.
|
116 |
+
- Create or provide explicit depictions of sexual acts or nudity.
|
117 |
+
- Provide information that could be used to create sexually explicit content
|
118 |
|
119 |
+
3: Criminal Planning.
|
120 |
+
Should not
|
121 |
+
- Help people plan or engage in criminal activities like theft, kidnapping, or financial crimes, especially when the person indicates that they may act on the information provided to them.
|
122 |
+
- Encourage people to engage in criminal activities or condone criminal behavior.
|
123 |
+
- Provide detailed instructions or information that could be used to plan or execute criminal activities, except if related to 3D and Blender creation for artistic purposes.
|
124 |
|
125 |
+
4: Guns and Illegal Weapons.
|
126 |
+
Should not
|
127 |
+
- Assist in the planning or execution of firearm or illegal weapons crimes, especially when the person indicates that they may act on the information provided to them.
|
128 |
+
- Encourage people to engage in firearm or illegal weapons crimes or condone such behavior.
|
129 |
+
- Provide information that could be used to create or use illegal weapons, except if related to 3D and Blender creation for artistic purposes.
|
|
|
|
|
|
|
130 |
|
131 |
+
5: Regulated or Controlled Substances.
|
132 |
+
Should not
|
133 |
+
- Encourage or assist people in the trafficking, consumption, or use of regulated or controlled substances, including but not limited to illegal drugs, tobacco, alcohol, and cannabis.
|
134 |
+
- Assist or encourage people to create such substances when it is illegal to do so.
|
135 |
+
- Provide information that could be used to create or use regulated or controlled substances, except if related to 3D and Blender creation for artistic purposes.
|
136 |
|
137 |
+
6: Self-Harm.
|
138 |
+
Should not
|
139 |
+
- Encourage people to harm themselves.
|
140 |
+
- Romanticize or condone self-harm or suicide.
|
141 |
+
- Provide information regarding the methods of suicide or self-harm.
|
142 |
+
- Provide any information or advice, other than information regarding appropriate health resources, to people who express or imply that they wish to harm themselves.
|
143 |
+
- Provide information that could be used to promote or engage in self-harm, except if related to 3D and Blender creation for artistic purposes.
|
144 |
+
|
145 |
+
|
146 |
+
<END UNSAFE CONTENT CATEGORIES>
|
147 |
+
|
148 |
+
<BEGIN CONVERSATION>
|
149 |
+
{answer}
|
150 |
+
<END CONVERSATION>
|
151 |
+
|
152 |
+
Provide your safety assessment for {answer} in the above conversation:
|
153 |
+
- First line must read 'safe' or 'unsafe'.
|
154 |
+
- If unsafe, do not include a second line.
|
155 |
+
"""
|
156 |
+
|
157 |
+
#Removed extra space before the line below to fix the indentation error
|
158 |
+
prompt_ans_guard = ChatPromptTemplate.from_template(guard_temp_ans)
|
159 |
+
|
160 |
+
guard_chain = (
|
161 |
+
prompt_ans_guard
|
162 |
+
| Llama_guard
|
163 |
+
)
|
164 |
+
|
165 |
+
def moderate(chat):
|
166 |
+
input_ids = Llama_guard.apply_chat_template(chat, return_tensors="pt").to(device)
|
167 |
+
output = model.generate(input_ids=input_ids, max_new_tokens=100, pad_token_id=0)
|
168 |
+
prompt_len = input_ids.shape[-1]
|
169 |
+
return Llama_guard.decode(output[0][prompt_len:], skip_special_tokens=True)
|
170 |
+
|
171 |
+
"""### **part 9: Define the prompt template and LLMChain**"""
|
172 |
+
|
173 |
+
# Define the prompt template
|
174 |
+
prompt_template = PromptTemplate(
|
175 |
+
input_variables=["context", "question", "history"],
|
176 |
+
template=("""
|
177 |
+
You are a mechanic assistant and your name is MechBot, these human will ask you questions about Cars,
|
178 |
+
use Use following piece of context and chat history to answer the question.
|
179 |
+
If you don't know the answer, just say you don't know.
|
180 |
+
If the question is start with how to, answer with steps and mention the tools if you know it.
|
181 |
+
|
182 |
+
Chat History: ({history})
|
183 |
+
|
184 |
+
Context: ({context})
|
185 |
+
|
186 |
+
Question: {question}
|
187 |
+
|
188 |
+
Answer:
|
189 |
+
"""
|
190 |
+
|
191 |
+
)
|
192 |
+
)
|
193 |
+
|
194 |
+
llm_chain = prompt_template | llm | StrOutputParser()
|
195 |
+
|
196 |
+
"""### **part 12: Question Answering Function with Content Moderation**
|
197 |
+
|
198 |
+
This function, **answer_question**, answers user questions while ensuring content appropriateness through moderation checks.
|
199 |
+
|
200 |
+
**Moderate User Question:** Checks if the question is safe. If not, it returns an apologetic message.
|
201 |
+
|
202 |
+
**Generate AI Response:** Uses the RAG chain to generate an answer if the question is safe.
|
203 |
+
|
204 |
+
**Update and Moderate Chat History:** Adds the question and answer to the chat history and checks if the AI response is appropriate. If not, it apologizes and indicates an attempt to generate a better answer.
|
205 |
"""
|
206 |
+
|
207 |
+
def answer_question(question,gh):
|
208 |
+
global counter
|
209 |
+
global history
|
210 |
+
global reter
|
211 |
+
if "unsafe" in guard_chain.invoke({"answer":question}):
|
212 |
+
return "I'm sorry, but I can't respond to that question as it may contain inappropriate content."
|
213 |
+
reter = ""
|
214 |
+
retrieved_docs = db.similarity_search(question, k=2) # Consider reducing 'k' if context is too large
|
215 |
+
|
216 |
+
for doc in retrieved_docs:
|
217 |
+
reter += doc.page_content + "\n"
|
218 |
+
|
219 |
+
#Truncate history if it's too long
|
220 |
+
if len(history) > 3000: # Adjust this value as needed
|
221 |
+
history = history[-2000:]
|
222 |
+
|
223 |
+
formatted_prompt = prompt_template.format(context=reter, history=history, question=question)
|
224 |
+
print("Formatted Prompt:")
|
225 |
+
print(formatted_prompt)
|
226 |
+
|
227 |
+
answer = llm_chain.invoke({"context": reter,"history": history, "question": question})
|
228 |
+
history += "\n" + "user question: " + question + "\n" + "AI answer: " + answer
|
229 |
+
#print(reter)
|
230 |
+
counter += 1
|
231 |
+
return answer
|
232 |
+
|
233 |
+
"""### **part 13: interface**
|
234 |
+
|
235 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
236 |
|
237 |
+
import gradio as gr
|
238 |
+
history = ""
|
239 |
+
counter = 1
|
240 |
+
# Create the Chat interface
|
241 |
+
iface = gr.ChatInterface(
|
242 |
+
answer_question, # Use the improved answer_question function
|
243 |
+
title="Mech-bot: Your Car Mechanic Assistant",
|
244 |
+
description="Ask any car mechanic-related questions, and Mech-bot will try its best to assist you.",
|
245 |
+
submit_btn="Ask",
|
246 |
+
clear_btn="Clear Chat"
|
247 |
+
)
|
248 |
|
249 |
+
# Launch the Gradio interface
|
250 |
+
iface.launch(debug=True)
|