talktome / app.py
Maymay07's picture
Upload 7 files
e4e51bf verified
raw
history blame
2.79 kB
import os
import openai
import streamlit as st
from utils import load_base_prompt
import configparser
# Create a ConfigParser object
config = configparser.ConfigParser()
# Read the config.ini file
config.read('config.ini')
# Access the password
open_ai_key = config.get('access', 'openai_key')
print(f"Openai key: {open_ai_key}")
# hack from: https://discuss.streamlit.io/t/remove-ui-top-bar-forehead/22071/3
hide_streamlit_style = """
<style>
#root > div:nth-child(1) > div > div > div > div > section > div {padding-top: 1.3rem;}
</style>
"""
# remove some padding bottom
st.markdown(
"""
<style>
.stChatFloatingInputContainer {padding-bottom: 1rem;}
</style>
""",
unsafe_allow_html=True,
)
# remove some padding in between
# st.markdown(
# """
# <style>
# .block-container.css-1y4p8pa.ea3mdgi4 {padding-bottom: .3rem;}
# </style>
# """,
# unsafe_allow_html=True,
# )
# st.markdown(
# """
# <style>
# .block-container.st-emotion-cache-1y4p8pa.ea3mdgi4 {padding-bottom: .5rem;}
# </style>
# """,
# unsafe_allow_html=True,
# )
st.title("Forher AI Chatbot")
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#openai.api_key = os.environ.get("open_ai_key")
openai.api_key =open_ai_key
base_prompt = load_base_prompt()
if "openai_model" not in st.session_state:
#st.session_state["openai_model"] = "gpt-3.5-turbo"
#st.session_state["openai_model"] = "gpt-4"
st.session_state["openai_model"] = "gpt-4-1106-preview"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
avatar = "πŸ‘€" if message["role"] == "user" else "πŸ€–"
with st.chat_message(message["role"], avatar=avatar):
st.markdown(message["content"])
if prompt := st.chat_input("Ask your question here", ):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar="πŸ§‘β€πŸ’»"):
st.markdown(prompt)
with st.chat_message("assistant", avatar="πŸ€–"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=
[{"role": "system", "content": base_prompt}] +
[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "β–Œ")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})