PierreBrunelle's picture
Update app.py
fb3b5a9 verified
raw
history blame
5.02 kB
# -*- coding: utf-8 -*-
"""LLM Comparison
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/156SKaX3DY6jwOhcpwZVM5AiLscOAbNNJ
"""
# Commented out IPython magic to ensure Python compatibility.
# %pip install -qU pixeltable gradio sentence-transformers tiktoken openai openpyxl
import gradio as gr
import pandas as pd
import pixeltable as pxt
from pixeltable.iterators import DocumentSplitter
import numpy as np
from pixeltable.functions.huggingface import sentence_transformer
from pixeltable.functions import openai
import os
"""## Store OpenAI API Key"""
if 'OPENAI_API_KEY' not in os.environ:
os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
"""Pixeltable Set up"""
# Ensure a clean slate for the demo
pxt.drop_dir('rag_demo', force=True)
pxt.create_dir('rag_demo')
# Set up embedding function
@pxt.expr_udf
def e5_embed(text: str) -> np.ndarray:
return sentence_transformer(text, model_id='intfloat/e5-large-v2')
# Create prompt function
@pxt.udf
def create_prompt(top_k_list: list[dict], question: str) -> str:
concat_top_k = '\n\n'.join(
elt['text'] for elt in reversed(top_k_list)
)
return f'''
PASSAGES:
{concat_top_k}
QUESTION:
{question}'''
"""Gradio Application"""
def process_files(ground_truth_file, pdf_files):
# Process ground truth file
if ground_truth_file.name.endswith('.csv'):
queries_t = pxt.io.import_csv(rag_demo.queries, ground_truth_file.name)
else:
queries_t = pxt.io.import_excel(rag_demo.queries, ground_truth_file.name)
# Process PDF files
documents_t = pxt.create_table(
'rag_demo.documents',
{'document': pxt.DocumentType()}
)
for pdf_file in pdf_files:
documents_t.insert({'document': pdf_file.name})
# Create chunks view
chunks_t = pxt.create_view(
'rag_demo.chunks',
documents_t,
iterator=DocumentSplitter.create(
document=documents_t.document,
separators='token_limit',
limit=300
)
)
# Add embedding index
chunks_t.add_embedding_index('text', string_embed=e5_embed)
# Create top_k query
@chunks_t.query
def top_k(query_text: str):
sim = chunks_t.text.similarity(query_text)
return (
chunks_t.order_by(sim, asc=False)
.select(chunks_t.text, sim=sim)
.limit(5)
)
# Add computed columns to queries_t
queries_t['question_context'] = chunks_t.top_k(queries_t.Question)
queries_t['prompt'] = create_prompt(
queries_t.question_context, queries_t.Question
)
# Prepare messages for OpenAI
messages = [
{
'role': 'system',
'content': 'Please read the following passages and answer the question based on their contents.'
},
{
'role': 'user',
'content': queries_t.prompt
}
]
def query_llm(question, ground_truth_file, pdf_files):
queries_t = pxt.get_table('rag_demo.queries')
chunks_t = pxt.get_table('rag_demo.chunks')
# Add OpenAI response column
queries_t['response'] = openai.chat_completions(
model='gpt-4-0125-preview', messages=messages
)
queries_t['answer'] = queries_t.response.choices[0].message.content
return "Files processed successfully!"
# Perform top-k lookup
context = chunks_t.top_k(question).collect()
# Create prompt
prompt = create_prompt(context, question)
# Prepare messages for OpenAI
messages = [
{
'role': 'system',
'content': 'Please read the following passages and answer the question based on their contents.'
},
{
'role': 'user',
'content': prompt
}
]
# Get LLM response
response = openai.chat_completions(model='gpt-4-0125-preview', messages=messages)
answer = response.choices[0].message.content
# Add new row to queries_t
new_row = {'Question': question, 'answer': answer}
queries_t.insert([new_row])
# Return updated dataframe
return queries_t.select(queries_t.Question, queries_t.answer).collect()
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# RAG Demo App")
with gr.Row():
ground_truth_file = gr.File(label="Upload Ground Truth (CSV or XLSX)", file_count="single")
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
process_button = gr.Button("Process Files")
process_output = gr.Textbox(label="Processing Output")
question_input = gr.Textbox(label="Enter your question")
query_button = gr.Button("Query LLM")
output_dataframe = gr.Dataframe(label="LLM Outputs")
process_button.click(process_files, inputs=[ground_truth_file, pdf_files], outputs=process_output)
query_button.click(query_llm, inputs=question_input, outputs=output_dataframe)
if __name__ == "__main__":
demo.launch()