Spaces:
Running
Running
import streamlit as st | |
from PIL import Image | |
import google.generativeai as genai | |
import os | |
MODEL_ID = "gemini-2.0-flash-exp" | |
api_key = os.getenv("GEMINI_API_KEY") | |
model_id = MODEL_ID | |
genai.configure(api_key=api_key) | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
if "model" not in st.session_state: | |
st.session_state.model = genai.GenerativeModel(MODEL_ID) | |
model = st.session_state.model | |
# Function to reset chat history | |
def reset_chat(): | |
st.session_state.messages = [] | |
model.start_chat() | |
# Streamlit app | |
st.title("Gemini Image Chat") | |
# File uploader with allowed types | |
uploaded_file = st.file_uploader("Choose an image or PDF...", type=["jpg", "jpeg", "png", "pdf"]) | |
if uploaded_file is not None: | |
# Determine file type | |
file_type = uploaded_file.type | |
if file_type.startswith('image'): | |
# Display the uploaded image | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image.", use_container_width=True) | |
mime_type = "image/jpeg" # Use a consistent MIME type for images | |
elif file_type == 'application/pdf': | |
# Display a message for PDF upload | |
st.write("PDF file uploaded. You can ask questions about its content.") | |
mime_type = "application/pdf" | |
else: | |
st.error("Unsupported file type. Please upload an image or PDF.") | |
st.stop() | |
# Reset chat history when a new file is uploaded | |
reset_chat() | |
# Text input for user prompt | |
user_input = st.text_input("Enter your prompt:") | |
# Send button | |
if st.button("Send"): | |
if user_input: | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
# Display chat history | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
with st.spinner("Processing..."): | |
# Upload the file with the correct MIME type | |
file_data = genai.upload_file(uploaded_file, mime_type=mime_type) | |
# Send file and prompt to Gemini API | |
response = model.generate_content( | |
[ | |
user_input, | |
file_data | |
] | |
) | |
# Add Gemini response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": response.text}) | |
# Display Gemini response | |
with st.chat_message("assistant"): | |
st.markdown(response.text) |