File size: 6,034 Bytes
94d6ea4 6351821 94d6ea4 d11ac2f 94d6ea4 6351821 94d6ea4 f3e24ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import streamlit as st
import google.generativeai as genai
from PIL import Image
import io
import base64
import pandas as pd
import zipfile
import PyPDF2
st.set_page_config(page_title="Gemini AI Chat", layout="wide")
st.title("🤖 Gemini AI Chat Interface")
st.markdown("""
**Welcome to the Gemini AI Chat Interface!**
Chat seamlessly with Google's advanced Gemini AI models, supporting multiple input types.
🔗 [GitHub Profile](https://github.com/volkansah) |
📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
💬 [Soon](https://aicodecraft.io)
""")
# Session State Management
if "messages" not in st.session_state:
st.session_state.messages = []
if "uploaded_content" not in st.session_state:
st.session_state.uploaded_content = None
# File Processing Functions
def encode_image(image):
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
def process_file(uploaded_file):
file_type = uploaded_file.name.split('.')[-1].lower()
if file_type in ["jpg", "jpeg", "png"]:
return {"type": "image", "content": Image.open(uploaded_file).convert('RGB')}
code_extensions = ["html", "css", "php", "js", "py", "java", "c", "cpp"]
if file_type in ["txt"] + code_extensions:
return {"type": "text", "content": uploaded_file.read().decode("utf-8")}
if file_type in ["csv", "xlsx"]:
df = pd.read_csv(uploaded_file) if file_type == "csv" else pd.read_excel(uploaded_file)
return {"type": "text", "content": df.to_string()}
if file_type == "pdf":
reader = PyPDF2.PdfReader(uploaded_file)
return {"type": "text", "content": "".join(page.extract_text() for page in reader.pages if page.extract_text())}
if file_type == "zip":
with zipfile.ZipFile(uploaded_file) as z:
# Fix: Define newline character outside f-string
newline = "\n"
return {"type": "text", "content": f"ZIP Contents:{newline}{newline.join(z.namelist())}"}
return {"type": "error", "content": "Unsupported file format"}
# Sidebar Configuration
with st.sidebar:
api_key = st.text_input("Google AI API Key", type="password")
model = st.selectbox("Model", [
"gemini-1.5-flash",
"gemini-1.5-pro",
"gemini-1.5-flash-8B",
"gemini-1.5-pro-vision-latest",
"gemini-1.0-pro",
"gemini-1.0-pro-vision-latest",
"gemini-2.0-pro-exp-02-05",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash",
"gemini-2.0-flash-thinking-exp-01-21"
])
temperature = st.slider("Temperature", 0.0, 1.0, 0.7)
max_tokens = st.slider("Max Tokens", 1, 2048, 1000)
# File Upload Section
uploaded_file = st.file_uploader("Upload File (Image/Text/PDF/ZIP)",
type=["jpg", "jpeg", "png", "txt", "pdf", "zip",
"csv", "xlsx", "html", "css", "php", "js", "py"])
if uploaded_file:
processed = process_file(uploaded_file)
st.session_state.uploaded_content = processed
if processed["type"] == "image":
st.image(processed["content"], caption="Uploaded Image", use_container_width=True)
elif processed["type"] == "text":
st.text_area("File Preview", processed["content"], height=200)
# Chat History Display
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat Input Processing
if prompt := st.chat_input("Your message..."):
if not api_key:
st.warning("API Key benötigt!")
st.stop()
try:
# Configure Gemini
genai.configure(api_key=api_key)
model_instance = genai.GenerativeModel(model)
# Build content payload
content = []
# Add text input
content.append({"text": prompt})
# Add file content
if st.session_state.uploaded_content:
if st.session_state.uploaded_content["type"] == "image":
content.append({
"inline_data": {
"mime_type": "image/jpeg",
"data": encode_image(st.session_state.uploaded_content["content"])
}
})
elif st.session_state.uploaded_content["type"] == "text":
content[0]["text"] += f"\n\n[File Content]\n{st.session_state.uploaded_content['content']}"
# Add to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
# Generate response
response = model_instance.generate_content(
content,
generation_config=genai.types.GenerationConfig(
temperature=temperature,
max_output_tokens=max_tokens
)
)
# Display response
with st.chat_message("assistant"):
st.markdown(response.text)
st.session_state.messages.append({"role": "assistant", "content": response.text})
except Exception as e:
st.error(f"API Error: {str(e)}")
if "vision" not in model and st.session_state.uploaded_content["type"] == "image":
st.error("Für Bilder einen Vision-fähigen Modell auswählen!")
# Instructions in the sidebar
with st.sidebar:
st.markdown("""
## 📝 Instructions:
1. Enter your Google AI API key
2. Select a model (use vision models for image analysis)
3. Adjust temperature and max tokens if needed
4. Optional: Set a system prompt
5. Upload an image (optional)
6. Type your message and press Enter
### About
🔗 [GitHub Profile](https://github.com/volkansah) |
📂 [Project Repository](https://github.com/volkansah/gemini-ai-chat) |
💬 [Soon](https://aicodecraft.io)
""") |