Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,177 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
if
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
|
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
"""
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
gr.
|
49 |
-
|
50 |
-
|
51 |
-
gr.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from groq import Groq
|
3 |
import gradio as gr
|
4 |
+
import fitz # PyMuPDF for PDF text extraction
|
5 |
+
from PIL import Image
|
6 |
+
import pytesseract
|
7 |
+
from fpdf import FPDF # Library for creating PDFs
|
8 |
|
9 |
+
# Set your API key
|
|
|
|
|
|
|
10 |
|
11 |
+
# Initialize the Groq client
|
12 |
+
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
13 |
|
14 |
+
# Initialize the conversation history
|
15 |
+
conversation_history = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
def extract_text_from_file(uploaded_file):
|
18 |
+
text = ""
|
19 |
+
try:
|
20 |
+
if uploaded_file.name.endswith(".pdf"):
|
21 |
+
doc = fitz.open(stream=uploaded_file.read(), filetype="pdf")
|
22 |
+
for page in doc:
|
23 |
+
text += page.get_text()
|
24 |
+
elif uploaded_file.name.endswith((".png", ".jpg", ".jpeg")):
|
25 |
+
image = Image.open(uploaded_file)
|
26 |
+
text = pytesseract.image_to_string(image)
|
27 |
+
else:
|
28 |
+
text = "Unsupported file format. Please upload a PDF or image file."
|
29 |
+
except Exception as e:
|
30 |
+
text = f"Error extracting text: {str(e)}"
|
31 |
+
return text
|
32 |
|
33 |
+
def legal_chatbot(user_query, uploaded_file):
|
34 |
+
role_context = (
|
35 |
+
"As a seasoned legal expert with over 30 years of experience specializing in Pakistani law and justice, "
|
36 |
+
"your role is to provide precise, actionable, and lawful advice. "
|
37 |
+
"You have an in-depth understanding of the Pakistan Penal Code, along with other significant legal frameworks, "
|
38 |
+
"and you are well-versed in recent amendments, landmark rulings, "
|
39 |
+
"and their implications. You are expected to respond with clear, comprehensive, and contextually accurate legal guidance. "
|
40 |
+
"Your response should focus on assisting legal professionals, such as lawyers and judges, in understanding and applying the relevant sections, rules, and case laws. "
|
41 |
+
"Ensure that your advice is practical, within the boundaries of law and ethics, and addresses the query with the aim of promoting justice and aiding in the resolution of legal matters. "
|
42 |
+
"If the query is unclear or lacks sufficient detail, provide general guidance or ask for additional information to deliver a more tailored response. "
|
43 |
+
"Your ultimate goal is to support the legal process and help people achieve justice."
|
44 |
+
)
|
45 |
|
46 |
+
if uploaded_file:
|
47 |
+
file_text = extract_text_from_file(uploaded_file)
|
48 |
+
user_query = f"{user_query}\n\nRefer to the following case file content:\n{file_text}"
|
49 |
|
50 |
+
llm_input = f"{role_context} {user_query}"
|
51 |
+
conversation_history.append({"role": "user", "content": user_query})
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
try:
|
54 |
+
chat_completion = client.chat.completions.create(
|
55 |
+
messages=[{"role": "user", "content": llm_input}],
|
56 |
+
model="llama3-8b-8192",
|
57 |
+
)
|
58 |
+
response = chat_completion.choices[0].message.content
|
59 |
+
conversation_history.append({"role": "assistant", "content": response})
|
60 |
+
except Exception as e:
|
61 |
+
response = f"An error occurred while generating the response: {str(e)}"
|
62 |
+
print(response) # Log the error for debugging
|
63 |
|
64 |
+
return response
|
65 |
+
|
66 |
+
def save_conversation():
|
67 |
+
pdf = FPDF()
|
68 |
+
pdf.set_auto_page_break(auto=True, margin=15)
|
69 |
+
pdf.add_page()
|
70 |
+
pdf.set_font("Arial", size=12)
|
71 |
+
|
72 |
+
for message in conversation_history:
|
73 |
+
role = "You" if message["role"] == "user" else "Legal AI Assistant"
|
74 |
+
pdf.multi_cell(0, 10, f"{role}: {message['content']}\n\n")
|
75 |
+
|
76 |
+
pdf_file = "conversation.pdf"
|
77 |
+
pdf.output(pdf_file)
|
78 |
+
|
79 |
+
return pdf_file
|
80 |
+
|
81 |
+
def clear_chat():
|
82 |
+
global conversation_history
|
83 |
+
conversation_history = []
|
84 |
+
return ""
|
85 |
+
|
86 |
+
css = """
|
87 |
+
#generate-button {
|
88 |
+
background-color: #388E3C; /* Darker green */
|
89 |
+
color: black;
|
90 |
+
border-radius: 5px;
|
91 |
+
padding: 2px 8px;
|
92 |
+
font-size: 16px; /* Slightly larger font size */
|
93 |
+
margin-top: 5px;
|
94 |
+
height: 35px;
|
95 |
+
}
|
96 |
+
|
97 |
+
#clear-button {
|
98 |
+
background-color: #D32F2F; /* Darker red */
|
99 |
+
color: red;
|
100 |
+
border-radius: 5px;
|
101 |
+
padding: 2px 8px;
|
102 |
+
font-size: 16px;
|
103 |
+
margin-top: 5px;
|
104 |
+
height: 35px;
|
105 |
+
}
|
106 |
+
|
107 |
+
#save-button {
|
108 |
+
background-color: #1976D2; /* Darker blue */
|
109 |
+
color: green;
|
110 |
+
border-radius: 5px;
|
111 |
+
padding: 2px 8px;
|
112 |
+
font-size: 16px;
|
113 |
+
margin-top: 5px;
|
114 |
+
height: 35px;
|
115 |
+
}
|
116 |
+
|
117 |
+
#chat-output {
|
118 |
+
overflow-y: auto;
|
119 |
+
height: 400px;
|
120 |
+
width: 100%;
|
121 |
+
}
|
122 |
"""
|
123 |
+
|
124 |
+
def gradio_interface():
|
125 |
+
with gr.Blocks(css=css) as demo:
|
126 |
+
with gr.Row():
|
127 |
+
gr.Markdown(f"<h2 style='text-align: center;'>⚖️ Legal AI Assistant Chatbot - Engr. Hamesh Raj</h2>")
|
128 |
+
with gr.Row():
|
129 |
+
gr.Markdown(f"<h4 style='text-align: center;'>Ask any legal questions related to Pakistani law. [Connect on LinkedIn](https://www.linkedin.com/in/hameshraj)</h4>")
|
130 |
+
|
131 |
+
with gr.Row():
|
132 |
+
with gr.Column(scale=0.5, min_width=30):
|
133 |
+
uploaded_file = gr.File(label="Upload a case file (PDF or Image)", elem_id="upload-box")
|
134 |
+
start_new_button = gr.Button("Start New", elem_id="clear-button", scale=0.25)
|
135 |
+
save_button = gr.Button("Save Conversation", elem_id="save-button", scale=0.25)
|
136 |
+
|
137 |
+
with gr.Column(scale=2):
|
138 |
+
user_query = gr.Textbox(label="Enter your legal query here", placeholder="Ask about any law, section, or legal issue...", lines=3, elem_id="query-box")
|
139 |
+
generate_button = gr.Button("Get Legal Advice", elem_id="generate-button")
|
140 |
+
download_link = gr.File(label="Download PDF", elem_id="download-link", visible=False)
|
141 |
+
|
142 |
+
with gr.Row():
|
143 |
+
chat_output = gr.Markdown(elem_id="chat-output", wrap_lines=True)
|
144 |
+
|
145 |
+
def update_chat(user_query, uploaded_file):
|
146 |
+
response = legal_chatbot(user_query, uploaded_file)
|
147 |
+
chat_history = ""
|
148 |
+
for message in conversation_history:
|
149 |
+
role = "You" if message["role"] == "user" else "Legal AI Assistant"
|
150 |
+
chat_history += f"**{role}:** {message['content']}\n\n---\n\n"
|
151 |
+
return chat_history
|
152 |
+
|
153 |
+
def save_and_update():
|
154 |
+
pdf_file = save_conversation()
|
155 |
+
return gr.update(visible=True), pdf_file
|
156 |
+
|
157 |
+
generate_button.click(
|
158 |
+
fn=update_chat,
|
159 |
+
inputs=[user_query, uploaded_file],
|
160 |
+
outputs=[chat_output]
|
161 |
+
)
|
162 |
+
|
163 |
+
start_new_button.click(
|
164 |
+
fn=clear_chat,
|
165 |
+
inputs=None,
|
166 |
+
outputs=[chat_output]
|
167 |
+
)
|
168 |
+
|
169 |
+
save_button.click(
|
170 |
+
fn=save_and_update,
|
171 |
+
inputs=None,
|
172 |
+
outputs=[download_link, download_link]
|
173 |
+
)
|
174 |
+
|
175 |
+
demo.launch(share=True)
|
176 |
+
|
177 |
+
gradio_interface()
|