Shiv22419 commited on
Commit
d2a20dd
·
verified ·
1 Parent(s): 289b181

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -152
app.py CHANGED
@@ -1,152 +0,0 @@
1
- import subprocess
2
-
3
- subprocess.check_call([
4
- "pip",
5
- "install",
6
- "langchain",
7
- "langchain-community",
8
- "streamlit",
9
- "huggingface_hub",
10
- "faiss-cpu",
11
- "langchain-together",
12
- "transformers"
13
- ])
14
-
15
- import time
16
- import os
17
- import streamlit as st
18
- from langchain.vectorstores import FAISS
19
- from langchain.embeddings import HuggingFaceEmbeddings
20
- from langchain.prompts import PromptTemplate
21
- from langchain.memory import ConversationBufferWindowMemory
22
- from langchain.chains import ConversationalRetrievalChain
23
- from langchain_together import Together
24
- from transformers import AutoTokenizer, AutoModel
25
-
26
- # Footer code
27
- def footer():
28
- """Displays a custom footer with a link to the GitHub repository."""
29
- st.markdown(
30
- """
31
- <div style="text-align: center; font-size: 12px; color: #999;">
32
- <p>
33
- Developed by <a href="https://github.com/Nike-one/BharatLAW" target="_blank">BharatLAW</a>
34
- </p>
35
- </div>
36
- """,
37
- unsafe_allow_html=True
38
- )
39
-
40
- # Set the Streamlit page configuration and theme
41
- st.set_page_config(page_title="BharatLAW", layout="centered")
42
-
43
- # Display the logo image
44
- col1, col2, col3 = st.columns([1, 30, 1])
45
- with col2:
46
- st.image("https://github.com/Nike-one/BharatLAW/blob/master/images/banner.png?raw=true", use_column_width=True)
47
-
48
- def hide_hamburger_menu():
49
- st.markdown("""
50
- <style>
51
- #MainMenu {visibility: hidden;}
52
- footer {visibility: hidden;}
53
- </style>
54
- """, unsafe_allow_html=True)
55
-
56
- hide_hamburger_menu()
57
-
58
- # Initialize session state for messages and memory
59
- if "messages" not in st.session_state:
60
- st.session_state.messages = []
61
-
62
- if "memory" not in st.session_state:
63
- st.session_state.memory = ConversationBufferWindowMemory(k=2, memory_key="chat_history", return_messages=True)
64
-
65
- @st.cache_resource
66
- def load_embeddings():
67
- """Load and cache the embeddings model."""
68
- tokenizer = AutoTokenizer.from_pretrained("nlpaueb/legal-bert")
69
- model = AutoModel.from_pretrained("nlpaueb/legal-bert")
70
- return HuggingFaceEmbeddings(model=model, tokenizer=tokenizer)
71
-
72
- embeddings = load_embeddings()
73
- db = FAISS.load_local("ipc_embed_db", embeddings, allow_dangerous_deserialization=True)
74
- db_retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": 3})
75
-
76
-
77
- # Rest of the code remains the same
78
-
79
-
80
- prompt_template = """
81
- <s>[INST]
82
- As a legal chatbot specializing in the Indian Penal Code, you are tasked with providing highly accurate and contextually appropriate responses. Ensure your answers meet these criteria:
83
- - Respond in a bullet-point format to clearly delineate distinct aspects of the legal query.
84
- - Each point should accurately reflect the breadth of the legal provision in question, avoiding over-specificity unless directly relevant to the user's query.
85
- - Clarify the general applicability of the legal rules or sections mentioned, highlighting any common misconceptions or frequently misunderstood aspects.
86
- - Limit responses to essential information that directly addresses the user's question, providing concise yet comprehensive explanations.
87
- - Avoid assuming specific contexts or details not provided in the query, focusing on delivering universally applicable legal interpretations unless otherwise specified.
88
- - Conclude with a brief summary that captures the essence of the legal discussion and corrects any common misinterpretations related to the topic.
89
- CONTEXT: {context}
90
- CHAT HISTORY: {chat_history}
91
- QUESTION: {question}
92
- ANSWER:
93
- - [Detail the first key aspect of the law, ensuring it reflects general application]
94
- - [Provide a concise explanation of how the law is typically interpreted or applied]
95
- - [Correct a common misconception or clarify a frequently misunderstood aspect]
96
- - [Detail any exceptions to the general rule, if applicable]
97
- - [Include any additional relevant information that directly relates to the user's query]
98
- </s>[INST]
99
- """
100
-
101
-
102
-
103
- prompt = PromptTemplate(template=prompt_template,
104
- input_variables=['context', 'question', 'chat_history'])
105
-
106
- api_key = os.getenv('TOGETHER_API_KEY')
107
- llm = Together(model="mistralai/Mixtral-8x22B-Instruct-v0.1", temperature=0.5, max_tokens=1024, together_api_key="7c9bbd129ef15842ca5205190e3f93cea81dd1a6b19c33e1ea5da635b6db1bb2")
108
-
109
- qa = ConversationalRetrievalChain.from_llm(llm=llm, memory=st.session_state.memory, retriever=db_retriever, combine_docs_chain_kwargs={'prompt': prompt})
110
-
111
- def extract_answer(full_response):
112
- """Extracts the answer from the LLM's full response by removing the instructional text."""
113
- answer_start = full_response.find("Response:")
114
- if answer_start != -1:
115
- answer_start += len("Response:")
116
- answer_end = len(full_response)
117
- return full_response[answer_start:answer_end].strip()
118
- return full_response
119
-
120
- def reset_conversation():
121
- st.session_state.messages = []
122
- st.session_state.memory.clear()
123
-
124
- for message in st.session_state.messages:
125
- with st.chat_message(message["role"]):
126
- st.write(message["content"])
127
-
128
-
129
- input_prompt = st.chat_input("Say something...")
130
- if input_prompt:
131
- with st.chat_message("user"):
132
- st.markdown(f"**You:** {input_prompt}")
133
-
134
- st.session_state.messages.append({"role": "user", "content": input_prompt})
135
- with st.chat_message("assistant"):
136
- with st.spinner("Thinking 💡..."):
137
- result = qa.invoke(input=input_prompt)
138
- message_placeholder = st.empty()
139
- answer = extract_answer(result["answer"])
140
-
141
- # Initialize the response message
142
- full_response = "⚠️ **_Gentle reminder: We generally ensure precise information, but do double-check._** \n\n\n"
143
- for chunk in answer:
144
- # Simulate typing by appending chunks of the response over time
145
- full_response += chunk
146
- time.sleep(0.02) # Adjust the sleep time to control the "typing" speed
147
- message_placeholder.markdown(full_response + " |", unsafe_allow_html=True)
148
-
149
- st.session_state.messages.append({"role": "assistant", "content": answer})
150
-
151
- if st.button('🗑️ Reset All Chat', on_click=reset_conversation):
152
- st.experimental_rerun()