Spaces:
Sleeping
Sleeping
Rohil Bansal
commited on
Commit
·
ef2f1a5
1
Parent(s):
96dad0a
Added execution flow.
Browse files- src/app/main.py +149 -96
src/app/main.py
CHANGED
@@ -13,99 +13,152 @@ from app.logger import setup_logger
|
|
13 |
from data.vector_db import load_vector_db, save_vector_db
|
14 |
from data.embeddings import get_openai_embeddings
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
st.
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
)
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
with
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
from data.vector_db import load_vector_db, save_vector_db
|
14 |
from data.embeddings import get_openai_embeddings
|
15 |
|
16 |
+
print("Starting src/app/main.py")
|
17 |
+
|
18 |
+
try:
|
19 |
+
# Load environment variables and setup logging
|
20 |
+
print("Loading environment variables and setting up logging")
|
21 |
+
openai_api_key = load_env_variables()
|
22 |
+
setup_logger()
|
23 |
+
print("Environment variables loaded and logging set up")
|
24 |
+
|
25 |
+
st.set_page_config(page_title="LawGPT")
|
26 |
+
print("Streamlit page config set")
|
27 |
+
|
28 |
+
col1, col2, col3 = st.columns([1, 4, 1])
|
29 |
+
with col2:
|
30 |
+
try:
|
31 |
+
st.image("assets/Black Bold Initial AI Business Logo.jpg")
|
32 |
+
print("Logo image loaded successfully")
|
33 |
+
except Exception as e:
|
34 |
+
print(f"Error loading logo image: {str(e)}")
|
35 |
+
|
36 |
+
print("Applying custom CSS")
|
37 |
+
st.markdown("""
|
38 |
+
<style>
|
39 |
+
.stApp, .ea3mdgi6{ background-color:#000000; }
|
40 |
+
div.stButton > button:first-child { background-color: #ffd0d0; }
|
41 |
+
div.stButton > button:active { background-color: #ff6262; }
|
42 |
+
div[data-testid="stStatusWidget"] div button { display: none; }
|
43 |
+
.reportview-container { margin-top: -2em; }
|
44 |
+
#MainMenu {visibility: hidden;}
|
45 |
+
.stDeployButton {display:none;}
|
46 |
+
footer {visibility: hidden;}
|
47 |
+
#stDecoration {display:none;}
|
48 |
+
button[title="View fullscreen"]{ visibility: hidden;}
|
49 |
+
button:first-child{ background-color : transparent !important; }
|
50 |
+
</style>
|
51 |
+
""", unsafe_allow_html=True)
|
52 |
+
|
53 |
+
def reset_conversation():
|
54 |
+
print("Resetting conversation")
|
55 |
+
st.session_state.messages = []
|
56 |
+
st.session_state.memory.clear()
|
57 |
+
print("Conversation reset complete")
|
58 |
+
|
59 |
+
print("Initializing session state")
|
60 |
+
if "messages" not in st.session_state:
|
61 |
+
st.session_state["messages"] = []
|
62 |
+
if "memory" not in st.session_state:
|
63 |
+
st.session_state["memory"] = ConversationBufferWindowMemory(k=2, memory_key="chat_history", return_messages=True)
|
64 |
+
print("Session state initialized")
|
65 |
+
|
66 |
+
print("Setting up OpenAI embeddings")
|
67 |
+
try:
|
68 |
+
embeddings = get_openai_embeddings(openai_api_key)
|
69 |
+
print("OpenAI embeddings set up successfully")
|
70 |
+
except Exception as e:
|
71 |
+
print(f"Error setting up OpenAI embeddings: {str(e)}")
|
72 |
+
raise
|
73 |
+
|
74 |
+
# Placeholder data for creating the vector database
|
75 |
+
data = [
|
76 |
+
"Example legal text 1",
|
77 |
+
"Example legal text 2",
|
78 |
+
"Example legal text 3",
|
79 |
+
# Add more data as needed
|
80 |
+
]
|
81 |
+
|
82 |
+
print("Loading vector database")
|
83 |
+
try:
|
84 |
+
db_path = "./ipc_vector_db/vectordb"
|
85 |
+
vector_db = load_vector_db(db_path, embeddings, data)
|
86 |
+
db_retriever = vector_db.as_retriever(search_type="similarity", search_kwargs={"k": 4})
|
87 |
+
print("Vector database loaded successfully")
|
88 |
+
except Exception as e:
|
89 |
+
print(f"Error loading vector database: {str(e)}")
|
90 |
+
raise
|
91 |
+
|
92 |
+
print("Setting up prompt template")
|
93 |
+
prompt_template = """
|
94 |
+
This is a chat template and As a legal chat bot specializing in Indian Penal Code queries, your primary objective is to provide accurate and concise information based on the user's questions. Do not generate your own questions and answers. You will adhere strictly to the instructions provided, offering relevant context from the knowledge base while avoiding unnecessary details. Your responses will be brief, to the point, and in compliance with the established format. If a question falls outside the given context, you will refrain from utilizing the chat history and instead rely on your own knowledge base to generate an appropriate response. You will prioritize the user's query and refrain from posing additional questions. The aim is to deliver professional, precise, and contextually relevant information pertaining to the Indian Penal Code.
|
95 |
+
CONTEXT: {context}
|
96 |
+
CHAT HISTORY: {chat_history}
|
97 |
+
QUESTION: {question}
|
98 |
+
ANSWER:
|
99 |
+
"""
|
100 |
+
prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question', 'chat_history'])
|
101 |
+
|
102 |
+
print("Setting up OpenAI LLM")
|
103 |
+
try:
|
104 |
+
llm = OpenAI(model_name="text-davinci-003", temperature=0.5, max_tokens=1024, openai_api_key=os.getenv("OPENAI_API_KEY"))
|
105 |
+
print("OpenAI LLM set up successfully")
|
106 |
+
except Exception as e:
|
107 |
+
print(f"Error setting up OpenAI LLM: {str(e)}")
|
108 |
+
raise
|
109 |
+
|
110 |
+
print("Setting up ConversationalRetrievalChain")
|
111 |
+
try:
|
112 |
+
qa = ConversationalRetrievalChain.from_llm(
|
113 |
+
llm=llm,
|
114 |
+
memory=ConversationBufferWindowMemory(k=2, memory_key="chat_history", return_messages=True),
|
115 |
+
retriever=db_retriever,
|
116 |
+
combine_docs_chain_kwargs={'prompt': prompt}
|
117 |
+
)
|
118 |
+
print("ConversationalRetrievalChain set up successfully")
|
119 |
+
except Exception as e:
|
120 |
+
print(f"Error setting up ConversationalRetrievalChain: {str(e)}")
|
121 |
+
raise
|
122 |
+
|
123 |
+
print("Displaying chat messages")
|
124 |
+
for message in st.session_state.get("messages", []):
|
125 |
+
with st.chat_message(message.get("role")):
|
126 |
+
st.write(message.get("content"))
|
127 |
+
|
128 |
+
input_prompt = st.chat_input("Say something")
|
129 |
+
|
130 |
+
if input_prompt:
|
131 |
+
print(f"Received input: {input_prompt}")
|
132 |
+
with st.chat_message("user"):
|
133 |
+
st.write(input_prompt)
|
134 |
+
|
135 |
+
st.session_state.messages.append({"role": "user", "content": input_prompt})
|
136 |
+
|
137 |
+
with st.chat_message("assistant"):
|
138 |
+
with st.spinner("Thinking 💡..."):
|
139 |
+
try:
|
140 |
+
print("Invoking ConversationalRetrievalChain")
|
141 |
+
result = qa.invoke(input=input_prompt)
|
142 |
+
print("ConversationalRetrievalChain invoked successfully")
|
143 |
+
|
144 |
+
message_placeholder = st.empty()
|
145 |
+
full_response = "⚠️ **_Note: Information provided may be inaccurate._** \n\n\n"
|
146 |
+
for chunk in result["answer"]:
|
147 |
+
full_response += chunk
|
148 |
+
time.sleep(0.02)
|
149 |
+
message_placeholder.markdown(full_response + " ▌")
|
150 |
+
print("Response displayed successfully")
|
151 |
+
except Exception as e:
|
152 |
+
print(f"Error generating or displaying response: {str(e)}")
|
153 |
+
st.error("An error occurred while processing your request. Please try again.")
|
154 |
+
|
155 |
+
st.button('Reset All Chat 🗑️', on_click=reset_conversation)
|
156 |
+
|
157 |
+
st.session_state.messages.append({"role": "assistant", "content": result["answer"]})
|
158 |
+
|
159 |
+
except Exception as e:
|
160 |
+
print(f"Unhandled exception in main.py: {str(e)}")
|
161 |
+
logging.exception("Unhandled exception in main.py")
|
162 |
+
st.error("An unexpected error occurred. Please try again later.")
|
163 |
+
|
164 |
+
print("End of src/app/main.py")
|