sairamn commited on
Commit
8896d3e
1 Parent(s): 4844e8f

Final work of v4

Browse files
Files changed (1) hide show
  1. app.py +39 -17
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import pickle
3
- import os
4
  from langchain_community.vectorstores import FAISS
5
  from langchain_community.embeddings import HuggingFaceEmbeddings
6
  from langchain.prompts import PromptTemplate
@@ -26,11 +25,9 @@ embeddings = load_embeddings()
26
  # Load FAISS index and pickle file without explicitly defining paths
27
  def load_db():
28
  try:
29
- # Load FAISS index directly from the folder "law_vector_db"
30
  db = FAISS.load_local("law_vector_db", embeddings, allow_dangerous_deserialization=True)
31
  print(f"FAISS index loaded successfully.")
32
 
33
- # Load metadata from the pickle file located in the same directory as FAISS index
34
  with open('law_vector_db/index.pkl', 'rb') as pkl_file:
35
  metadata = pickle.load(pkl_file)
36
  print("Pickle file loaded successfully.")
@@ -62,8 +59,8 @@ ANSWER:</s>[INST]
62
 
63
  prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question', 'chat_history'])
64
 
65
- # Directly set Together API key
66
- TOGETHER_AI_API = "66bd7a6dc11956ddb311b773c0deabda8870e8c90e9f548ce064880ac47c4b05" # Hardcoded API key
67
 
68
  # Initialize LLM (Together API)
69
  llm = Together(
@@ -75,7 +72,7 @@ llm = Together(
75
 
76
 
77
  # Function to process user input and generate responses
78
- def ask_question(user_question, chat_history=""):
79
  try:
80
  # Retrieve relevant documents from FAISS index
81
  context_docs = db_retriever.get_relevant_documents(user_question)
@@ -86,26 +83,51 @@ def ask_question(user_question, chat_history=""):
86
  input_data = {
87
  "context": context,
88
  "question": user_question,
89
- "chat_history": chat_history
90
  }
91
 
92
  # Generate the answer using Together API
93
  response = llm(prompt.format(**input_data))
94
  return response
95
-
96
  except Exception as e:
97
  return f"Error: {e}"
98
 
99
 
100
- # Set up Gradio interface
101
- iface = gr.Interface(
102
- fn=ask_question,
103
- inputs=[gr.Textbox(label="Ask a Question", placeholder="Type your question here..."),
104
- gr.Textbox(label="Chat History (Optional)", placeholder="Type chat history here...", lines=2)],
105
- outputs="text",
106
- title="Legal Chatbot",
107
- description="Ask questions about the Indian Penal Code."
108
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  # Launch the Gradio app
111
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  import pickle
 
3
  from langchain_community.vectorstores import FAISS
4
  from langchain_community.embeddings import HuggingFaceEmbeddings
5
  from langchain.prompts import PromptTemplate
 
25
  # Load FAISS index and pickle file without explicitly defining paths
26
  def load_db():
27
  try:
 
28
  db = FAISS.load_local("law_vector_db", embeddings, allow_dangerous_deserialization=True)
29
  print(f"FAISS index loaded successfully.")
30
 
 
31
  with open('law_vector_db/index.pkl', 'rb') as pkl_file:
32
  metadata = pickle.load(pkl_file)
33
  print("Pickle file loaded successfully.")
 
59
 
60
  prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question', 'chat_history'])
61
 
62
+ # Together API Key (hardcoded)
63
+ TOGETHER_AI_API = "66bd7a6dc11956ddb311b773c0deabda8870e8c90e9f548ce064880ac47c4b05"
64
 
65
  # Initialize LLM (Together API)
66
  llm = Together(
 
72
 
73
 
74
  # Function to process user input and generate responses
75
+ def ask_question(user_question, chat_history=[]):
76
  try:
77
  # Retrieve relevant documents from FAISS index
78
  context_docs = db_retriever.get_relevant_documents(user_question)
 
83
  input_data = {
84
  "context": context,
85
  "question": user_question,
86
+ "chat_history": "\n".join(chat_history) # Chat history as string
87
  }
88
 
89
  # Generate the answer using Together API
90
  response = llm(prompt.format(**input_data))
91
  return response
 
92
  except Exception as e:
93
  return f"Error: {e}"
94
 
95
 
96
+ # Function to manage conversation flow
97
+ def chat_bot_interface(user_message, chat_history=[]):
98
+ if not user_message:
99
+ return chat_history, chat_history # No update if message is empty
100
+
101
+ # Append user message
102
+ chat_history.append(("User", user_message))
103
+
104
+ # Get system response
105
+ response = ask_question(user_message, [msg[1] for msg in chat_history if msg[0] == "User"])
106
+
107
+ # Append system response
108
+ chat_history.append(("Assistant", response))
109
+
110
+ return chat_history, chat_history
111
+
112
+
113
+ # Set up Gradio interface with a professional chatbot UI
114
+ with gr.Blocks() as iface:
115
+ gr.Markdown("<h1 style='text-align: center;'>Legal Chatbot</h1>")
116
+
117
+ chatbot = gr.Chatbot(label="Chatbot Interface")
118
+ user_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...", lines=1)
119
+ clear_button = gr.Button("Clear")
120
+
121
+ # Maintain chat history
122
+ chat_history = gr.State([])
123
+
124
+
125
+ def clear_chat():
126
+ return [], []
127
+
128
+
129
+ user_input.submit(chat_bot_interface, inputs=[user_input, chat_history], outputs=[chatbot, chat_history])
130
+ clear_button.click(clear_chat, outputs=[chatbot, chat_history])
131
 
132
  # Launch the Gradio app
133
  if __name__ == "__main__":