Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,25 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
import os
|
4 |
-
from langchain import PromptTemplate
|
5 |
-
from langchain import LLMChain
|
6 |
from langchain_together import Together
|
7 |
-
import re
|
8 |
import pdfplumber
|
9 |
-
# Set the API key with double quotes
|
10 |
|
|
|
11 |
os.environ['TOGETHER_API_KEY'] = "c2f52626b97118b71c0c36f66eda4f5957c8fc475e760c3d72f98ba07d3ed3b5"
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
with pdfplumber.open(
|
16 |
for i, page in enumerate(pdf.pages):
|
17 |
if i >= max_pages:
|
18 |
break
|
19 |
text += page.extract_text() + "\n"
|
|
|
20 |
|
21 |
-
def Bot(
|
22 |
chat_template = """
|
23 |
Based on the provided context: {text}
|
24 |
Please answer the following question: {Questions}
|
25 |
-
|
26 |
Only provide answers that are directly related to the context. If the question is unrelated, respond with "I don't know".
|
27 |
"""
|
28 |
prompt = PromptTemplate(
|
@@ -35,7 +32,7 @@ def Bot(Questions):
|
|
35 |
try:
|
36 |
response = Generated_chat.invoke({
|
37 |
"text": text,
|
38 |
-
"Questions":
|
39 |
})
|
40 |
|
41 |
response_text = response['text']
|
@@ -55,43 +52,33 @@ def Bot(Questions):
|
|
55 |
except Exception as e:
|
56 |
return f"Error in generating response: {e}"
|
57 |
|
58 |
-
def ChatBot(
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
# if similarity.item() > threshold:
|
81 |
-
# response=Bot(Questions)
|
82 |
-
# return response
|
83 |
-
# else:
|
84 |
-
# response="The statement is not related to the text."
|
85 |
-
# return response
|
86 |
-
|
87 |
-
iface = gr.Interface(fn=ChatBot, inputs="text", outputs="text", title="Chatbot")
|
88 |
iface.launch(debug=True)
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import os
|
3 |
+
from langchain import PromptTemplate, LLMChain
|
|
|
4 |
from langchain_together import Together
|
|
|
5 |
import pdfplumber
|
|
|
6 |
|
7 |
+
# Set the API key
|
8 |
os.environ['TOGETHER_API_KEY'] = "c2f52626b97118b71c0c36f66eda4f5957c8fc475e760c3d72f98ba07d3ed3b5"
|
9 |
|
10 |
+
def extract_text_from_pdf(pdf_file, max_pages=16):
|
11 |
+
text = ""
|
12 |
+
with pdfplumber.open(pdf_file) as pdf:
|
13 |
for i, page in enumerate(pdf.pages):
|
14 |
if i >= max_pages:
|
15 |
break
|
16 |
text += page.extract_text() + "\n"
|
17 |
+
return text
|
18 |
|
19 |
+
def Bot(text, question):
|
20 |
chat_template = """
|
21 |
Based on the provided context: {text}
|
22 |
Please answer the following question: {Questions}
|
|
|
23 |
Only provide answers that are directly related to the context. If the question is unrelated, respond with "I don't know".
|
24 |
"""
|
25 |
prompt = PromptTemplate(
|
|
|
32 |
try:
|
33 |
response = Generated_chat.invoke({
|
34 |
"text": text,
|
35 |
+
"Questions": question
|
36 |
})
|
37 |
|
38 |
response_text = response['text']
|
|
|
52 |
except Exception as e:
|
53 |
return f"Error in generating response: {e}"
|
54 |
|
55 |
+
def ChatBot(history, document, question):
|
56 |
+
greetings = ["hi", "hello", "hey", "greetings", "what's up", "howdy"]
|
57 |
+
question_lower = question.lower().strip()
|
58 |
+
if question_lower in greetings or any(question_lower.startswith(greeting) for greeting in greetings):
|
59 |
+
return history + [("User", question), ("Bot", "Hello! How can I assist you with the document today?")]
|
60 |
+
|
61 |
+
# Extract text from the uploaded PDF document
|
62 |
+
text = extract_text_from_pdf(document)
|
63 |
+
|
64 |
+
# Generate the bot response based on the question and extracted text
|
65 |
+
response = Bot(text, question)
|
66 |
+
|
67 |
+
# Update chat history with the user's question and bot's response
|
68 |
+
history.append(("User", question))
|
69 |
+
history.append(("Bot", response))
|
70 |
+
|
71 |
+
return history
|
72 |
+
|
73 |
+
# Set up the Gradio interface using Blocks
|
74 |
+
with gr.Blocks() as iface:
|
75 |
+
chatbot = gr.Chatbot()
|
76 |
+
document = gr.File(label="Upload PDF Document", type="filepath")
|
77 |
+
question = gr.Textbox(label="Ask a Question", placeholder="Type your question here...")
|
78 |
+
|
79 |
+
def respond(history, document, question):
|
80 |
+
return ChatBot(history, document, question)
|
81 |
+
|
82 |
+
question.submit(respond, [chatbot, document, question], chatbot)
|
83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
iface.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|