Spaces:
Sleeping
Sleeping
File size: 6,092 Bytes
6dd04d7 f1e86f0 6dd04d7 f1e86f0 6dd04d7 503b734 6dd04d7 e87396d 6dd04d7 503b734 2ff0980 503b734 6dd04d7 503b734 6dd04d7 503b734 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import time
import os
import streamlit as st
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationalRetrievalChain
from langchain_together import Together
from footer import footer
# Set the Streamlit page configuration and theme
st.set_page_config(page_title="In-Legal-IPC", layout="centered")
# Display the logo image
col1, col2, col3 = st.columns([1, 30, 1])
with col2:
st.image("https://raw.githubusercontent.com/shiv4321/Images/refs/heads/main/Banner.png", use_container_width=True)
def hide_hamburger_menu():
st.markdown("""
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
""", unsafe_allow_html=True)
hide_hamburger_menu()
# Initialize session state for messages and memory
if "messages" not in st.session_state:
st.session_state.messages = []
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=5, memory_key="chat_history", return_messages=True)
@st.cache_resource
def load_embeddings():
"""Load and cache the embeddings model."""
return HuggingFaceEmbeddings(model_name="law-ai/InLegalBERT")
embeddings = load_embeddings()
db = FAISS.load_local("ipc_embed_db", embeddings, allow_dangerous_deserialization=True)
db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 3})
prompt_template = """
<s>[INST]
As a legal chatbot specializing in the Indian Penal Code, you are tasked with providing highly accurate and contextually appropriate responses. Ensure your answers meet these criteria:
- Respond in a bullet-point format to clearly delineate distinct aspects of the legal query.
- Each point should accurately reflect the breadth of the legal provision in question, avoiding over-specificity unless directly relevant to the user's query.
- Clarify the general applicability of the legal rules or sections mentioned, highlighting any common misconceptions or frequently misunderstood aspects.
- Limit responses to essential information that directly addresses the user's question, providing concise yet comprehensive explanations.
- Avoid assuming specific contexts or details not provided in the query, focusing on delivering universally applicable legal interpretations unless otherwise specified.
- Conclude with a brief summary that captures the essence of the legal discussion and corrects any common misinterpretations related to the topic.
CONTEXT: {context}
CHAT HISTORY: {chat_history}
QUESTION: {question}
ANSWER:
- [Detail the first key aspect of the law, ensuring it reflects general application]
- [Provide a concise explanation of how the law is typically interpreted or applied]
- [Correct a common misconception or clarify a frequently misunderstood aspect]
- [Detail any exceptions to the general rule, if applicable]
- [Include any additional relevant information that directly relates to the user's query]
</s>[INST]
"""
prompt = PromptTemplate(template=prompt_template,
input_variables=['context', 'question', 'chat_history'])
api_key = os.getenv('TOGETHER_API_KEY')
llm = Together(model="mistralai/Mixtral-8x22B-Instruct-v0.1", temperature=0.5, max_tokens=1024, together_api_key="7c9bbd129ef15842ca5205190e3f93cea81dd1a6b19c33e1ea5da635b6db1bb2")
qa = ConversationalRetrievalChain.from_llm(llm=llm, memory=st.session_state.memory, retriever=db_retriever, combine_docs_chain_kwargs={'prompt': prompt})
def extract_answer(full_response):
"""Extracts the answer from the LLM's full response by removing the instructional text."""
answer_start = full_response.find("Response:")
if answer_start != -1:
answer_start += len("Response:")
answer_end = len(full_response)
return full_response[answer_start:answer_end].strip()
return full_response
def reset_conversation():
st.session_state.messages = []
st.session_state.memory.clear()
# Add the PDF button just above the chat input
st.markdown("""
### Useful PDFs
- [π Commercial Court Rules and Forms](https://drive.google.com/file/d/1puzlPMT7fTt4utWJaGlFtOjW38CoFctc/view?usp=drive_link)
- [π Bail-Bond](https://drive.google.com/file/d/1uRlT7Yo_2jemxs5aRyvHzoLgeS7S81Vn/view?usp=drive_link)
- [π Inspection Form](https://drive.google.com/file/d/1Ib-RC4xPMqZVl7YgVES3Vb47Rb42W83s/view?usp=drive_link)
- [π Additional PDF](https://drive.google.com/file/d/1Xkq64r4Id8qSyb5woVzdvArhzUxvKJk8/view?usp=drive_link)
""", unsafe_allow_html=True)
# Display previous messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Chat input area
input_prompt = st.chat_input("Say something...")
if input_prompt:
with st.chat_message("user"):
st.markdown(f"**You:** {input_prompt}")
st.session_state.messages.append({"role": "user", "content": input_prompt})
with st.chat_message("assistant"):
with st.spinner("Thinking π‘..."):
result = qa.invoke(input=input_prompt)
message_placeholder = st.empty()
answer = extract_answer(result["answer"])
# Initialize the response message
full_response = "β οΈ **_Gentle reminder: We generally ensure precise information, but do double-check._** \n\n\n"
for chunk in answer:
# Simulate typing by appending chunks of the response over time
full_response += chunk
time.sleep(0.02) # Adjust the sleep time to control the "typing" speed
message_placeholder.markdown(full_response + " |", unsafe_allow_html=True)
st.session_state.messages.append({"role": "assistant", "content": answer})
if st.button('ποΈ Reset All Chat', on_click=reset_conversation):
st.experimental_rerun()
# Define the CSS to style the footer
footer() |