Docfile commited on
Commit
d5a6a33
·
verified ·
1 Parent(s): 7c7bca7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import google.generativeai as genai
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ load_dotenv()
7
+ # Configure the API key
8
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
9
+
10
+
11
+ # Initialize the Generative Model
12
+ model = genai.GenerativeModel('gemini-1.5-flash')
13
+
14
+ # Function to get response from the model
15
+ # Gemini uses 'model' for assistant; Streamlit uses 'assistant'
16
+
17
+
18
+ def role_to_streamlit(role):
19
+ if role == "model":
20
+ return "assistant"
21
+ else:
22
+ return role
23
+
24
+
25
+ # Add a Gemini Chat history object to Streamlit session state
26
+ if "chat" not in st.session_state:
27
+ st.session_state.chat = model.start_chat(history=[])
28
+
29
+ # Display Form Title
30
+ st.title("Chat with Google Gemini-1.5-flash!")
31
+
32
+ # Display chat messages from history above current input box
33
+ for message in st.session_state.chat.history:
34
+ with st.chat_message(role_to_streamlit(message.role)):
35
+ st.markdown(message.parts[0].text)
36
+
37
+ # Accept user's next message, add to context, resubmit context to Gemini
38
+ if prompt := st.chat_input("I possess a well of knowledge. What would you like to know?"):
39
+ # Display user's last message
40
+ st.chat_message("user").markdown(prompt)
41
+
42
+ # Send user entry to Gemini and read the response
43
+ response = st.session_state.chat.send_message(prompt)
44
+
45
+ # Display last
46
+ with st.chat_message("assistant"):
47
+ st.markdown(response.text)