File size: 3,303 Bytes
184bec9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc697fd
 
 
 
 
 
184bec9
 
 
fc697fd
 
 
 
 
 
 
 
 
 
 
184bec9
fc697fd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
import gradio as gr
import asyncio
from langchain_core.prompts import PromptTemplate
from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
from langchain_community.document_loaders import PyPDFLoader
from langchain_google_genai import ChatGoogleGenerativeAI
import google.generativeai as genai
from langchain.chains.question_answering import load_qa_chain
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Gemini PDF QA System
async def initialize(file_path, question):
    genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
    model = genai.GenerativeModel('gemini-pro')
    model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
    prompt_template = """Answer the question as precise as possible using the provided context. If the answer is
                          not contained in the context, say "answer not available in context" \n\n
                          Context: \n {context}?\n
                          Question: \n {question} \n
                          Answer:
                        """
    prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
    if os.path.exists(file_path):
        pdf_loader = PyPDFLoader(file_path)
        pages = pdf_loader.load_and_split()
        context = "\n".join(str(page.page_content) for page in pages[:30])
        stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
        stuff_answer = await stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
        return stuff_answer['output_text']
    else:
        return "Error: Unable to process the document. Please ensure the PDF file is valid."

async def pdf_qa(file, question):
    answer = await initialize(file.name, question)
    return answer

# Mistral Text Completion
def load_mistral_model():
    model_path = "nvidia/Mistral-NeMo-Minitron-8B-Base"
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    dtype = torch.bfloat16
    model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device)
    return tokenizer, model

def generate_text(prompt, max_length=50):
    tokenizer, model = load_mistral_model()
    inputs = tokenizer.encode(prompt, return_tensors='pt').to(model.device)
    outputs = model.generate(inputs, max_length=max_length)
    return tokenizer.decode(outputs[0])

# Gradio Interface
input_file = gr.File(label="Upload PDF File")
input_question = gr.Textbox(label="Ask about the document")
output_text_gemini = gr.Textbox(label="Answer - GeminiPro")
input_prompt = gr.Textbox(label="Enter prompt for text completion")
output_text_mistral = gr.Textbox(label="Completed Text - Mistral")

def pdf_qa_wrapper(file, question):
    return asyncio.run(pdf_qa(file, question))

# Create Gradio Interface
iface = gr.Interface(
    fn=[pdf_qa_wrapper, generate_text],
    inputs=[
        [input_file, input_question],
        input_prompt
    ],
    outputs=[output_text_gemini, output_text_mistral],
    title="Combined PDF QA and Text Completion System",
    description="Upload a PDF file to ask questions about its content, or enter a prompt for text completion."
)

iface.launch()