File size: 5,018 Bytes
fb3b5a9 ef89dbb 650714a fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 ef89dbb fb3b5a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
# -*- coding: utf-8 -*-
"""LLM Comparison
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/156SKaX3DY6jwOhcpwZVM5AiLscOAbNNJ
"""
# Commented out IPython magic to ensure Python compatibility.
# %pip install -qU pixeltable gradio sentence-transformers tiktoken openai openpyxl
import gradio as gr
import pandas as pd
import pixeltable as pxt
from pixeltable.iterators import DocumentSplitter
import numpy as np
from pixeltable.functions.huggingface import sentence_transformer
from pixeltable.functions import openai
import os
"""## Store OpenAI API Key"""
if 'OPENAI_API_KEY' not in os.environ:
os.environ['OPENAI_API_KEY'] = getpass.getpass('Enter your OpenAI API key:')
"""Pixeltable Set up"""
# Ensure a clean slate for the demo
pxt.drop_dir('rag_demo', force=True)
pxt.create_dir('rag_demo')
# Set up embedding function
@pxt.expr_udf
def e5_embed(text: str) -> np.ndarray:
return sentence_transformer(text, model_id='intfloat/e5-large-v2')
# Create prompt function
@pxt.udf
def create_prompt(top_k_list: list[dict], question: str) -> str:
concat_top_k = '\n\n'.join(
elt['text'] for elt in reversed(top_k_list)
)
return f'''
PASSAGES:
{concat_top_k}
QUESTION:
{question}'''
"""Gradio Application"""
def process_files(ground_truth_file, pdf_files):
# Process ground truth file
if ground_truth_file.name.endswith('.csv'):
queries_t = pxt.io.import_csv(rag_demo.queries, ground_truth_file.name)
else:
queries_t = pxt.io.import_excel(rag_demo.queries, ground_truth_file.name)
# Process PDF files
documents_t = pxt.create_table(
'rag_demo.documents',
{'document': pxt.DocumentType()}
)
for pdf_file in pdf_files:
documents_t.insert({'document': pdf_file.name})
# Create chunks view
chunks_t = pxt.create_view(
'rag_demo.chunks',
documents_t,
iterator=DocumentSplitter.create(
document=documents_t.document,
separators='token_limit',
limit=300
)
)
# Add embedding index
chunks_t.add_embedding_index('text', string_embed=e5_embed)
# Create top_k query
@chunks_t.query
def top_k(query_text: str):
sim = chunks_t.text.similarity(query_text)
return (
chunks_t.order_by(sim, asc=False)
.select(chunks_t.text, sim=sim)
.limit(5)
)
# Add computed columns to queries_t
queries_t['question_context'] = chunks_t.top_k(queries_t.Question)
queries_t['prompt'] = create_prompt(
queries_t.question_context, queries_t.Question
)
# Prepare messages for OpenAI
messages = [
{
'role': 'system',
'content': 'Please read the following passages and answer the question based on their contents.'
},
{
'role': 'user',
'content': queries_t.prompt
}
]
def query_llm(question, ground_truth_file, pdf_files):
queries_t = pxt.get_table('rag_demo.queries')
chunks_t = pxt.get_table('rag_demo.chunks')
# Add OpenAI response column
queries_t['response'] = openai.chat_completions(
model='gpt-4-0125-preview', messages=messages
)
queries_t['answer'] = queries_t.response.choices[0].message.content
return "Files processed successfully!"
# Perform top-k lookup
context = chunks_t.top_k(question).collect()
# Create prompt
prompt = create_prompt(context, question)
# Prepare messages for OpenAI
messages = [
{
'role': 'system',
'content': 'Please read the following passages and answer the question based on their contents.'
},
{
'role': 'user',
'content': prompt
}
]
# Get LLM response
response = openai.chat_completions(model='gpt-4-0125-preview', messages=messages)
answer = response.choices[0].message.content
# Add new row to queries_t
new_row = {'Question': question, 'answer': answer}
queries_t.insert([new_row])
# Return updated dataframe
return queries_t.select(queries_t.Question, queries_t.answer).collect()
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# RAG Demo App")
with gr.Row():
ground_truth_file = gr.File(label="Upload Ground Truth (CSV or XLSX)", file_count="single")
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
process_button = gr.Button("Process Files")
process_output = gr.Textbox(label="Processing Output")
question_input = gr.Textbox(label="Enter your question")
query_button = gr.Button("Query LLM")
output_dataframe = gr.Dataframe(label="LLM Outputs")
process_button.click(process_files, inputs=[ground_truth_file, pdf_files], outputs=process_output)
query_button.click(query_llm, inputs=question_input, outputs=output_dataframe)
if __name__ == "__main__":
demo.launch()
|