import os
import openai
import streamlit as st
from utils import load_base_prompt
import configparser
# Create a ConfigParser object
config = configparser.ConfigParser()
# Read the config.ini file
config.read('config.ini')
# Access the password
open_ai_key = config.get('access', 'openai_key')
print(f"Openai key: {open_ai_key}")
# hack from: https://discuss.streamlit.io/t/remove-ui-top-bar-forehead/22071/3
hide_streamlit_style = """
"""
# remove some padding bottom
st.markdown(
"""
""",
unsafe_allow_html=True,
)
# remove some padding in between
# st.markdown(
# """
#
# """,
# unsafe_allow_html=True,
# )
# st.markdown(
# """
#
# """,
# unsafe_allow_html=True,
# )
st.title("Forher AI Genie")
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#openai.api_key = os.environ.get("open_ai_key")
openai.api_key =open_ai_key
base_prompt = load_base_prompt()
if "openai_model" not in st.session_state:
#st.session_state["openai_model"] = "gpt-3.5-turbo"
#st.session_state["openai_model"] = "gpt-4"
st.session_state["openai_model"] = "gpt-4-1106-preview"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
avatar = "👤" if message["role"] == "user" else "🤖"
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"])
if prompt := st.chat_input("Ask your question here", ):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar="🧑💻"):
st.markdown(prompt)
with st.chat_message("assistant", avatar="🤖"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=
[{"role": "system", "content": base_prompt}] +
[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})