Spaces:
Sleeping
Sleeping
Rohil Bansal
commited on
Commit
·
d8143c9
1
Parent(s):
4adc02d
working...
Browse files- app.py +4 -2
- graphs/workflow_graph.jpg +2 -2
- src/__pycache__/buildgraph.cpython-312.pyc +0 -0
- src/__pycache__/graph.cpython-312.pyc +0 -0
- src/__pycache__/index.cpython-312.pyc +0 -0
- src/__pycache__/llm.cpython-312.pyc +0 -0
- src/__pycache__/websearch.cpython-312.pyc +0 -0
- src/buildgraph.py +62 -21
- src/graph.py +33 -5
- src/llm.py +5 -2
- vectordb/65ba2328-ffa1-497d-b641-c6b84db7f0e1/length.bin +1 -1
app.py
CHANGED
@@ -48,7 +48,7 @@ if "messages" not in st.session_state:
|
|
48 |
if "thread_id" not in st.session_state:
|
49 |
st.session_state.thread_id = "streamlit_thread"
|
50 |
|
51 |
-
config = {"configurable": {"thread_id": st.session_state.thread_id}}
|
52 |
|
53 |
# Display chat messages from history on app rerun
|
54 |
for message in st.session_state.messages:
|
@@ -71,9 +71,11 @@ if prompt := st.chat_input("What is your question?"):
|
|
71 |
full_response = "⚠️ **_Note: Information provided may be inaccurate._** \n\n\n"
|
72 |
for char in response_content:
|
73 |
full_response += char
|
|
|
74 |
time.sleep(0.03)
|
75 |
-
message_placeholder.markdown(full_response + "
|
76 |
message_placeholder.markdown(full_response)
|
|
|
77 |
|
78 |
# Add assistant response to chat history
|
79 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
|
48 |
if "thread_id" not in st.session_state:
|
49 |
st.session_state.thread_id = "streamlit_thread"
|
50 |
|
51 |
+
config = {"recursion_limit": 15, "configurable": {"thread_id": st.session_state.thread_id}}
|
52 |
|
53 |
# Display chat messages from history on app rerun
|
54 |
for message in st.session_state.messages:
|
|
|
71 |
full_response = "⚠️ **_Note: Information provided may be inaccurate._** \n\n\n"
|
72 |
for char in response_content:
|
73 |
full_response += char
|
74 |
+
|
75 |
time.sleep(0.03)
|
76 |
+
message_placeholder.markdown(full_response + "|")
|
77 |
message_placeholder.markdown(full_response)
|
78 |
+
print(full_response)
|
79 |
|
80 |
# Add assistant response to chat history
|
81 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
graphs/workflow_graph.jpg
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
src/__pycache__/buildgraph.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/buildgraph.cpython-312.pyc and b/src/__pycache__/buildgraph.cpython-312.pyc differ
|
|
src/__pycache__/graph.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/graph.cpython-312.pyc and b/src/__pycache__/graph.cpython-312.pyc differ
|
|
src/__pycache__/index.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/index.cpython-312.pyc and b/src/__pycache__/index.cpython-312.pyc differ
|
|
src/__pycache__/llm.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/llm.cpython-312.pyc and b/src/__pycache__/llm.cpython-312.pyc differ
|
|
src/__pycache__/websearch.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/websearch.cpython-312.pyc and b/src/__pycache__/websearch.cpython-312.pyc differ
|
|
src/buildgraph.py
CHANGED
@@ -2,6 +2,7 @@ from src.graph import *
|
|
2 |
from langgraph.graph import END, StateGraph, START
|
3 |
import sys
|
4 |
from langgraph.checkpoint.memory import MemorySaver
|
|
|
5 |
|
6 |
memory = MemorySaver()
|
7 |
|
@@ -20,6 +21,8 @@ try:
|
|
20 |
workflow.add_node("grade_documents", grade_documents)
|
21 |
workflow.add_node("generate", generate)
|
22 |
workflow.add_node("transform_query", transform_query)
|
|
|
|
|
23 |
print("Nodes added successfully.")
|
24 |
|
25 |
print("Building graph edges...")
|
@@ -36,36 +39,60 @@ try:
|
|
36 |
|
37 |
workflow.add_edge("greeting", END)
|
38 |
workflow.add_edge("off_topic", END)
|
39 |
-
|
40 |
-
workflow.add_edge("retrieve", "grade_documents")
|
41 |
workflow.add_conditional_edges(
|
42 |
-
"
|
43 |
-
|
44 |
{
|
45 |
-
"
|
46 |
-
"
|
47 |
-
}
|
48 |
)
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
workflow.add_conditional_edges(
|
51 |
"generate",
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
{
|
54 |
"not supported": "generate",
|
55 |
"useful": END,
|
56 |
"not useful": "transform_query",
|
57 |
-
}
|
58 |
)
|
|
|
|
|
|
|
59 |
workflow.add_conditional_edges(
|
60 |
-
"
|
61 |
-
|
62 |
{
|
63 |
-
"
|
64 |
-
"
|
65 |
-
}
|
66 |
)
|
|
|
|
|
|
|
67 |
print("Graph edges built successfully.")
|
68 |
|
|
|
69 |
print("Compiling the workflow...")
|
70 |
app = workflow.compile(checkpointer=memory)
|
71 |
print("Workflow compiled successfully.")
|
@@ -102,6 +129,9 @@ try:
|
|
102 |
print(f"Error handling graph visualization: {e}")
|
103 |
print("Graph visualization skipped.")
|
104 |
|
|
|
|
|
|
|
105 |
except Exception as e:
|
106 |
print(f"Error building the graph: {e}")
|
107 |
sys.exit(1)
|
@@ -120,12 +150,23 @@ def run_workflow(question, config):
|
|
120 |
}
|
121 |
|
122 |
final_output = None
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
if final_output is None:
|
131 |
return {"generation": "I'm sorry, I couldn't generate a response. Could you please rephrase your question?"}
|
|
|
2 |
from langgraph.graph import END, StateGraph, START
|
3 |
import sys
|
4 |
from langgraph.checkpoint.memory import MemorySaver
|
5 |
+
from langgraph.errors import GraphRecursionError
|
6 |
|
7 |
memory = MemorySaver()
|
8 |
|
|
|
21 |
workflow.add_node("grade_documents", grade_documents)
|
22 |
workflow.add_node("generate", generate)
|
23 |
workflow.add_node("transform_query", transform_query)
|
24 |
+
workflow.add_node("grade_generation", grade_generation_v_documents_and_question)
|
25 |
+
|
26 |
print("Nodes added successfully.")
|
27 |
|
28 |
print("Building graph edges...")
|
|
|
39 |
|
40 |
workflow.add_edge("greeting", END)
|
41 |
workflow.add_edge("off_topic", END)
|
42 |
+
|
|
|
43 |
workflow.add_conditional_edges(
|
44 |
+
"route_question",
|
45 |
+
lambda x: x["route_question"],
|
46 |
{
|
47 |
+
"web_search": "web_search",
|
48 |
+
"vectorstore": "retrieve",
|
49 |
+
}
|
50 |
)
|
51 |
+
|
52 |
+
workflow.add_conditional_edges(
|
53 |
+
"retrieve",
|
54 |
+
check_recursion_limit,
|
55 |
+
{
|
56 |
+
"web_search": "web_search",
|
57 |
+
"continue": "grade_documents",
|
58 |
+
}
|
59 |
+
)
|
60 |
+
|
61 |
workflow.add_conditional_edges(
|
62 |
"generate",
|
63 |
+
check_recursion_limit,
|
64 |
+
{
|
65 |
+
"web_search": "web_search",
|
66 |
+
"continue": "grade_generation",
|
67 |
+
}
|
68 |
+
)
|
69 |
+
|
70 |
+
workflow.add_conditional_edges(
|
71 |
+
"grade_generation",
|
72 |
+
lambda x: x["grade_generation"],
|
73 |
{
|
74 |
"not supported": "generate",
|
75 |
"useful": END,
|
76 |
"not useful": "transform_query",
|
77 |
+
}
|
78 |
)
|
79 |
+
|
80 |
+
workflow.add_edge("transform_query", "route_question")
|
81 |
+
|
82 |
workflow.add_conditional_edges(
|
83 |
+
"grade_documents",
|
84 |
+
decide_to_generate,
|
85 |
{
|
86 |
+
"transform_query": "transform_query",
|
87 |
+
"generate": "generate",
|
88 |
+
},
|
89 |
)
|
90 |
+
|
91 |
+
workflow.add_edge("web_search", "generate")
|
92 |
+
|
93 |
print("Graph edges built successfully.")
|
94 |
|
95 |
+
|
96 |
print("Compiling the workflow...")
|
97 |
app = workflow.compile(checkpointer=memory)
|
98 |
print("Workflow compiled successfully.")
|
|
|
129 |
print(f"Error handling graph visualization: {e}")
|
130 |
print("Graph visualization skipped.")
|
131 |
|
132 |
+
except GraphRecursionError:
|
133 |
+
print("Graph recursion limit reached during compilation.")
|
134 |
+
# Handle the error as needed
|
135 |
except Exception as e:
|
136 |
print(f"Error building the graph: {e}")
|
137 |
sys.exit(1)
|
|
|
150 |
}
|
151 |
|
152 |
final_output = None
|
153 |
+
use_web_search = False
|
154 |
+
|
155 |
+
try:
|
156 |
+
for output in app.stream(input_state, config):
|
157 |
+
for key, value in output.items():
|
158 |
+
print(f"Node '{key}'")
|
159 |
+
if key in ["grade_generation", "off_topic", "greeting", "web_search"]:
|
160 |
+
final_output = value
|
161 |
+
except GraphRecursionError:
|
162 |
+
print("Graph recursion limit reached, switching to web search")
|
163 |
+
use_web_search = True
|
164 |
+
|
165 |
+
if use_web_search:
|
166 |
+
# Force the use of web_search
|
167 |
+
web_search_result = web_search(input_state)
|
168 |
+
generate_result = generate(web_search_result)
|
169 |
+
final_output = generate_result
|
170 |
|
171 |
if final_output is None:
|
172 |
return {"generation": "I'm sorry, I couldn't generate a response. Could you please rephrase your question?"}
|
src/graph.py
CHANGED
@@ -3,6 +3,7 @@ from typing_extensions import TypedDict
|
|
3 |
from src.websearch import *
|
4 |
from src.llm import *
|
5 |
from langchain.schema import Document, AIMessage
|
|
|
6 |
|
7 |
class GraphState(TypedDict):
|
8 |
question: str
|
@@ -65,6 +66,8 @@ def generate(state):
|
|
65 |
|
66 |
generation_prompt = f"""
|
67 |
As LegalAlly, an AI assistant specializing in the Indian Penal Code, provide a helpful and informative response to the following question. Use the given context and chat history for reference.
|
|
|
|
|
68 |
|
69 |
Context:
|
70 |
{context}
|
@@ -79,7 +82,7 @@ def generate(state):
|
|
79 |
|
80 |
generation = llm.invoke(generation_prompt)
|
81 |
generation = generation.content if hasattr(generation, 'content') else str(generation)
|
82 |
-
|
83 |
return {
|
84 |
"documents": documents,
|
85 |
"question": question,
|
@@ -185,6 +188,7 @@ def grade_generation_v_documents_and_question(state):
|
|
185 |
question = state["question"]
|
186 |
documents = state["documents"]
|
187 |
generation = state["generation"]
|
|
|
188 |
|
189 |
score = hallucination_grader.invoke(
|
190 |
{"documents": documents, "generation": generation}
|
@@ -197,14 +201,32 @@ def grade_generation_v_documents_and_question(state):
|
|
197 |
grade = score.binary_score
|
198 |
if grade == "yes":
|
199 |
print("---DECISION: GENERATION ADDRESSES QUESTION---")
|
200 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
201 |
else:
|
202 |
print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
|
203 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
204 |
else:
|
205 |
print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
|
206 |
-
return
|
207 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
208 |
def greeting(state):
|
209 |
print("---GREETING---")
|
210 |
return {
|
@@ -216,3 +238,9 @@ def off_topic(state):
|
|
216 |
return {
|
217 |
"generation": "I apologize, but I specialize in matters related to the Indian Penal Code. Could you please ask a question about Indian law or legal matters?"
|
218 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from src.websearch import *
|
4 |
from src.llm import *
|
5 |
from langchain.schema import Document, AIMessage
|
6 |
+
from langgraph.errors import GraphRecursionError
|
7 |
|
8 |
class GraphState(TypedDict):
|
9 |
question: str
|
|
|
66 |
|
67 |
generation_prompt = f"""
|
68 |
As LegalAlly, an AI assistant specializing in the Indian Penal Code, provide a helpful and informative response to the following question. Use the given context and chat history for reference.
|
69 |
+
Responses are concise and answer user's queries directly. They are not verbose. The answer feels natural and not robotic.
|
70 |
+
Make sure the answer is grounded in the documents and is not hallucination.
|
71 |
|
72 |
Context:
|
73 |
{context}
|
|
|
82 |
|
83 |
generation = llm.invoke(generation_prompt)
|
84 |
generation = generation.content if hasattr(generation, 'content') else str(generation)
|
85 |
+
|
86 |
return {
|
87 |
"documents": documents,
|
88 |
"question": question,
|
|
|
188 |
question = state["question"]
|
189 |
documents = state["documents"]
|
190 |
generation = state["generation"]
|
191 |
+
chat_history = state.get("chat_history", [])
|
192 |
|
193 |
score = hallucination_grader.invoke(
|
194 |
{"documents": documents, "generation": generation}
|
|
|
201 |
grade = score.binary_score
|
202 |
if grade == "yes":
|
203 |
print("---DECISION: GENERATION ADDRESSES QUESTION---")
|
204 |
+
return {
|
205 |
+
"grade_generation": "useful",
|
206 |
+
"question": question,
|
207 |
+
"generation": generation,
|
208 |
+
"documents": documents,
|
209 |
+
"chat_history": chat_history
|
210 |
+
}
|
211 |
else:
|
212 |
print("---DECISION: GENERATION DOES NOT ADDRESS QUESTION---")
|
213 |
+
return {
|
214 |
+
"grade_generation": "not useful",
|
215 |
+
"question": question,
|
216 |
+
"generation": generation,
|
217 |
+
"documents": documents,
|
218 |
+
"chat_history": chat_history
|
219 |
+
}
|
220 |
else:
|
221 |
print("---DECISION: GENERATION IS NOT GROUNDED IN DOCUMENTS, RE-TRY---")
|
222 |
+
return {
|
223 |
+
"grade_generation": "not supported",
|
224 |
+
"question": question,
|
225 |
+
"generation": generation,
|
226 |
+
"documents": documents,
|
227 |
+
"chat_history": chat_history
|
228 |
+
}
|
229 |
+
|
230 |
def greeting(state):
|
231 |
print("---GREETING---")
|
232 |
return {
|
|
|
238 |
return {
|
239 |
"generation": "I apologize, but I specialize in matters related to the Indian Penal Code. Could you please ask a question about Indian law or legal matters?"
|
240 |
}
|
241 |
+
|
242 |
+
# conditional edges for recursion limit
|
243 |
+
def check_recursion_limit(state):
|
244 |
+
# LangGraph will automatically raise GraphRecursionError if the limit is reached
|
245 |
+
# We don't need to check for it explicitly
|
246 |
+
return "continue"
|
src/llm.py
CHANGED
@@ -27,8 +27,11 @@ structured_llm_router = llm.with_structured_output(RouteQuery)
|
|
27 |
# Prompt
|
28 |
system = """You are an expert at routing a user question to a vectorstore or web search.
|
29 |
The vectorstore contains documents related to Indian Penal Code and The Indian Constitution.
|
30 |
-
It can answer
|
31 |
-
Use
|
|
|
|
|
|
|
32 |
route_prompt = ChatPromptTemplate.from_messages(
|
33 |
[
|
34 |
("system", system),
|
|
|
27 |
# Prompt
|
28 |
system = """You are an expert at routing a user question to a vectorstore or web search.
|
29 |
The vectorstore contains documents related to Indian Penal Code and The Indian Constitution.
|
30 |
+
It can answer questions related to Indian Law, IPC and the Constitution.
|
31 |
+
Use vectorstore if the question is a legal query within the scope of IPC, Indian Law and the Indian Constitution.
|
32 |
+
Use web-search if the question is a legal query outside the scope of IPC, Indian Law and the Indian Constitution.
|
33 |
+
Use web-search and your own knowledge if the question requires general legal help.
|
34 |
+
Use web-search if the questions is a legal query that requires latest information."""
|
35 |
route_prompt = ChatPromptTemplate.from_messages(
|
36 |
[
|
37 |
("system", system),
|
vectordb/65ba2328-ffa1-497d-b641-c6b84db7f0e1/length.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4000
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e2f111e5a36cc65e03a6865c3705bf32a17689ff8658620547a6164df6dff14
|
3 |
size 4000
|