LynxDemo / app.py
Allen Park
feat(docx text extraction): extract all the text from the uploaded docx file
6efea88
raw
history blame
9.26 kB
import os
import re
import io
from typing import List, Tuple, Union
from pathlib import Path
import gradio as gr
import openai
import pymupdf
from docx import Document
HF_TOKEN = os.environ.get("HF_TOKEN", None)
LEPTON_API_TOKEN = os.environ.get("LEPTON_API_TOKEN", None)
# client=openai.OpenAI(
# base_url="https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/",
# api_key=LEPTON_API_TOKEN
# )
# client=openai.OpenAI(
# base_url="https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/",
# api_key=LEPTON_API_TOKEN
# )
PROMPT = """
Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning.
--
QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):
{question}
--
DOCUMENT:
{document}
--
ANSWER:
{answer}
--
Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE":
{{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}}
"""
css = """
@import url('https://fonts.googleapis.com/css2?family=Plus+Jakarta+Sans:wght@400;700&display=swap');
body, .gradio-container {
font-family: 'Plus Jakarta Sans', sans-serif !important;
}
"""
HEADER = """
# Patronus Lynx Demo
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:50px;">
<td style="text-align: center;">
<a href="https://www.patronus.ai">
<img src="https://cdn.prod.website-files.com/64e655d42d3be60f582d0472/64ede352897bcddbe2d41207_patronusai_final_logo.svg" width="200" height="40" />
</a>
</td>
</tr>
</table>
<table bgcolor="#1E2432" cellspacing="0" cellpadding="0" width="450">
<tr style="height:30px;">
<td style="text-align: center;">
<a href="https://huggingface.co/PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct"><img src="https://img.shields.io/badge/%F0%9F%A4%97%20Model_Card-Huggingface-orange" height="20"></a>
</td>
<td style="text-align: center;">
<a href="https://github.com/patronus-ai/Lynx-hallucination-detection"><img src="https://postimage.me/images/2024/03/04/GitHub_Logo_White.png" width="100" height="20"></a>
</td>
<td style="text-align: center; color: white;">
<a href="https://arxiv.org/abs/2407.08488"><img src="https://img.shields.io/badge/arXiv-2407.08488-b31b1b.svg" height="20"></a>
</td>
</tr>
</table>
**Patronus Lynx** is a state-of-the-art open-source model for hallucination detection.
**Getting Started**: Provide a question and document or context given to your model in addition to the answer given by the model and then click submit. The output panel will indicate whether the reponse is a hallucination (Fail) or if it is faithful to the given document or context (Pass) through the score Pass or Fail and provide reasoning behind the score.
"""
UPLOADABLE_FILE_TYPES = [".pdf", ".txt", ".docx", ".doc"]
def update_client_base_url(model_name):
if model_name == "Patronus Lynx 8B v1.1":
return "https://yb15a7dy-patronus-lynx-8b-v1-1.tin.lepton.run/api/v1/"
elif model_name == "Patronus Lynx 70B":
return "https://yb15a7dy-lynx-70b.tin.lepton.run/api/v1/"
def parse_patronus_lynx_response(
response: str,
) -> Tuple[bool, Union[List[str], None]]:
"""
Parses the response from the Patronus Lynx LLM and returns a tuple of:
- Whether the response is hallucinated or not.
- A reasoning trace explaining the decision.
"""
# Default to hallucinated
hallucination, reasoning = True, None
reasoning_pattern = r'"REASONING":\s*\[(.*?)\]'
score_pattern = r'"SCORE":\s*"?\b(PASS|FAIL)\b"?'
reasoning_match = re.search(reasoning_pattern, response, re.DOTALL)
score_match = re.search(score_pattern, response)
if score_match:
score = score_match.group(1)
if score == "PASS":
hallucination = False
if reasoning_match:
reasoning_content = reasoning_match.group(1)
reasoning = re.split(r"['\"],\s*['\"]", reasoning_content)
return hallucination, reasoning
def model_call(question, document, answer, client_base_url):
client = openai.OpenAI(
base_url=client_base_url,
api_key=LEPTON_API_TOKEN
)
print("CLIENT AND CLIENT BASE URL", client, client_base_url)
if question == "" or document == "" or answer == "":
return "", ""
NEW_FORMAT = PROMPT.format(question=question, document=document, answer=answer)
print("ENTIRE NEW_FORMAT", NEW_FORMAT)
response = client.completions.create(
model="gpt-3.5-turbo-instruct",
prompt=NEW_FORMAT,
temperature=0.0
)
print("RESPONSE FROM CLIENT:", response)
hallucination, reasoning = parse_patronus_lynx_response(response.choices[0].text)
score = "FAIL" if hallucination else "PASS"
combined_reasoning = " ".join(reasoning)[1:-1]
return combined_reasoning, score
def get_filetype(filename):
return filename.split(".")[-1]
def extract_text_pymupdf(file):
with pymupdf.open(file) as pdf_or_txt:
text = ""
for page in pdf_or_txt:
text += page.get_text()
return text
def extract_text_python_docx(file):
doc = Document(io.BytesIO(file))
text = ""
for paragraph in doc.paragraphs:
text += paragraph.text + '\n'
return text.strip()
def upload_file(filepath):
extracted_file_text = ""
if filepath is not None:
name = Path(filepath).name
print("FILEPATH & file name", filepath, name)
print("FILEPATH type & file name type", type(filepath), type(name))
filetype = get_filetype(name)
# conditionals for filetype and function call
if filetype == "pdf" or filetype == "txt":
extracted_file_text = extract_text_pymupdf(filepath)
elif filetype == "docx":
extracted_file_text = extract_text_python_docx(filepath)
return [gr.UploadButton(visible=False), gr.Group(visible=True), gr.Markdown("**Uploaded file:** {name}".format(name=name)), extracted_file_text]
else:
return [gr.UploadButton(visible=True, file_count="single", file_types=UPLOADABLE_FILE_TYPES), gr.Group(visible=False), gr.Markdown(""), extracted_file_text]
# return [gr.UploadButton(visible=False), gr.DownloadButton(label=f"Download {name}", value=filepath, visible=True)]
def reset_buttons():
return [gr.UploadButton(visible=True, file_count="single", file_types=UPLOADABLE_FILE_TYPES), gr.Group(visible=False), gr.Markdown(""), gr.Textbox(value="")]
# def download_file():
# return [gr.UploadButton(visible=True), gr.DownloadButton(visible=False)]
# inputs = [
# gr.Textbox(label="Question"),
# gr.Textbox(label="Document"),
# gr.Textbox(label="Answer")
# ]
# outputs = [
# gr.Textbox(label="Reasoning"),
# gr.Textbox(label="Score")
# ]
with gr.Blocks(css=css) as demo:
base_url_state = gr.State(update_client_base_url("Patronus Lynx 8B v1.1"))
gr.Markdown(HEADER)
# gr.Interface(fn=model_call, inputs=inputs, outputs=outputs)
model_dropdown = gr.Dropdown(choices=["Patronus Lynx 8B v1.1", "Patronus Lynx 70B"], value="Patronus Lynx 8B v1.1", label="Model", interactive=True)
with gr.Row():
with gr.Column(scale=1):
with gr.Row():
question = gr.Textbox(label="Question")
with gr.Row():
document = gr.Textbox(label="Document", scale=9)
u = gr.UploadButton("Upload", visible=True, file_count="single", file_types=UPLOADABLE_FILE_TYPES, scale=1)
file_group = gr.Group(visible=False)
with file_group:
file_name = gr.Markdown("")
c = gr.ClearButton([u, file_name])
# d = gr.DownloadButton("Download the file", visible=False, scale=1)
with gr.Row():
answer = gr.Textbox(label="Answer")
with gr.Row():
clear_btn = gr.ClearButton([question, document, answer])
submit_button = gr.Button("Submit")
with gr.Column(scale=1):
reasoning = gr.Textbox(label="Reasoning")
score = gr.Textbox(label="Score (FAIL if Hallucinated, PASS if not)")
model_dropdown.change(fn=update_client_base_url, inputs=[model_dropdown], outputs=[base_url_state])
u.upload(upload_file, u, [u, file_group, file_name, document])
c.click(reset_buttons, None, [u, file_group, file_name, document])
# d.click(download_file, None, [u, d])
submit_button.click(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
question.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
document.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
answer.submit(fn=model_call, inputs=[question, document, answer, base_url_state], outputs=[reasoning, score])
demo.launch()