|
""" |
|
Install the Google AI Python SDK |
|
|
|
$ pip install google-generativeai |
|
|
|
See the getting started guide for more information: |
|
https://ai.google.dev/gemini-api/docs/get-started/python |
|
""" |
|
|
|
import os |
|
import time |
|
|
|
import google.generativeai as genai |
|
|
|
genai.configure(api_key=os.environ["GEMINI_API_KEY"]) |
|
|
|
def upload_to_gemini(path, mime_type=None): |
|
"""Uploads the given file to Gemini. |
|
|
|
See https://ai.google.dev/gemini-api/docs/prompting_with_media |
|
""" |
|
file = genai.upload_file(path, mime_type=mime_type) |
|
print(f"Uploaded file '{file.display_name}' as: {file.uri}") |
|
return file |
|
|
|
def wait_for_files_active(files): |
|
"""Waits for the given files to be active. |
|
|
|
Some files uploaded to the Gemini API need to be processed before they can be |
|
used as prompt inputs. The status can be seen by querying the file's "state" |
|
field. |
|
|
|
This implementation uses a simple blocking polling loop. Production code |
|
should probably employ a more sophisticated approach. |
|
""" |
|
print("Waiting for file processing...") |
|
for name in (file.name for file in files): |
|
file = genai.get_file(name) |
|
while file.state.name == "PROCESSING": |
|
print(".", end="", flush=True) |
|
time.sleep(10) |
|
file = genai.get_file(name) |
|
if file.state.name != "ACTIVE": |
|
raise Exception(f"File {file.name} failed to process") |
|
print("...all files ready") |
|
print() |
|
|
|
|
|
|
|
generation_config = { |
|
"temperature": 1, |
|
"top_p": 0.95, |
|
"top_k": 64, |
|
"max_output_tokens": 8192, |
|
"response_mime_type": "text/plain", |
|
} |
|
|
|
model = genai.GenerativeModel( |
|
model_name="gemini-1.5-pro", |
|
generation_config=generation_config, |
|
|
|
|
|
system_instruction="You are an expert economist. Provide concise answers and if you do not know, just say you do not know.", |
|
) |
|
|
|
|
|
|
|
files = [ |
|
upload_to_gemini("2024_25_Annex_Budget.pdf", mime_type="application/pdf"), |
|
upload_to_gemini("2024_25_Budget_Speech.pdf", mime_type="application/pdf"), |
|
] |
|
|
|
|
|
wait_for_files_active(files) |
|
|
|
chat_session = model.start_chat( |
|
history=[ ] |
|
) |
|
|
|
response = chat_session.send_message("INSERT_INPUT_HERE") |
|
|
|
print(response.text) |
|
|
|
|
|
def main(): |
|
st.set_page_config( |
|
page_title="Budget 2024-2025 Chatbot", |
|
page_icon="🤖" |
|
) |
|
|
|
|
|
with st.sidebar: |
|
st.title("Menu:") |
|
pdf_docs = st.file_uploader( |
|
"Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True) |
|
if st.button("Submit & Process"): |
|
with st.spinner("Processing..."): |
|
raw_text = get_pdf_text(pdf_docs) |
|
text_chunks = get_text_chunks(raw_text) |
|
get_vector_store(text_chunks) |
|
st.success("Done") |
|
|
|
|
|
st.title("Chat with PDF files using Gemini🤖") |
|
st.write("Welcome to the chat!") |
|
st.sidebar.button('Clear Chat History', on_click=clear_chat_history) |
|
|
|
|
|
|
|
|
|
if "messages" not in st.session_state.keys(): |
|
st.session_state.messages = [ |
|
{"role": "assistant", "content": "upload some pdfs and ask me a question"}] |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.write(message["content"]) |
|
|
|
if prompt := st.chat_input(): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
with st.chat_message("user"): |
|
st.write(prompt) |
|
|
|
|
|
if st.session_state.messages[-1]["role"] != "assistant": |
|
with st.chat_message("assistant"): |
|
with st.spinner("Thinking..."): |
|
response = user_input(prompt) |
|
placeholder = st.empty() |
|
full_response = '' |
|
for item in response['output_text']: |
|
full_response += item |
|
placeholder.markdown(full_response) |
|
placeholder.markdown(full_response) |
|
if response is not None: |
|
message = {"role": "assistant", "content": full_response} |
|
st.session_state.messages.append(message) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |