import streamlit as st import os import google.generativeai as genai genai.configure(api_key=os.environ["GEMINI_API_KEY"]) # Create the model # See https://ai.google.dev/api/python/google/generativeai/GenerativeModel generation_config = { "temperature": 1, "top_p": 0.95, "top_k": 64, "max_output_tokens": 8192, "response_mime_type": "text/plain", } safety_settings = [ { "category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE", }, { "category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE", }, { "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE", }, { "category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE", }, ] model = genai.GenerativeModel( model_name="gemini-1.5-flash-latest", safety_settings=safety_settings, generation_config=generation_config, ) chat_session = model.start_chat( history=[] ) # response = chat_session.send_message("INSERT_INPUT_HERE") # print(response.text) # print(chat_session.history) st.title("Gemini 1.5") # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # React to user input if prompt := st.chat_input("What is up?"): # Display user message in chat message container st.chat_message("user").markdown(prompt) # Add user message to chat history st.session_state.messages.append({"role": "user", "content": prompt}) # Response response = chat_session.send_message(prompt) # Display assistant response in chat message container with st.chat_message("assistant"): st.markdown(response.text) # Add assistant response to chat history st.session_state.messages.append({"role": "assistant", "content": response})