|
import os |
|
|
|
import openai |
|
import streamlit as st |
|
|
|
from utils import load_base_prompt |
|
|
|
|
|
import configparser |
|
|
|
|
|
config = configparser.ConfigParser() |
|
|
|
|
|
config.read('config.ini') |
|
|
|
|
|
open_ai_key = config.get('access', 'openai_key') |
|
|
|
|
|
print(f"Openai key: {open_ai_key}") |
|
|
|
|
|
|
|
|
|
hide_streamlit_style = """ |
|
<style> |
|
#root > div:nth-child(1) > div > div > div > div > section > div {padding-top: 1.3rem;} |
|
</style> |
|
|
|
""" |
|
|
|
|
|
st.markdown( |
|
""" |
|
<style> |
|
.stChatFloatingInputContainer {padding-bottom: 1rem;} |
|
</style> |
|
""", |
|
unsafe_allow_html=True, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.title("Forher AI Chatbot") |
|
st.markdown(hide_streamlit_style, unsafe_allow_html=True) |
|
|
|
|
|
|
|
openai.api_key =open_ai_key |
|
base_prompt = load_base_prompt() |
|
|
|
if "openai_model" not in st.session_state: |
|
|
|
|
|
st.session_state["openai_model"] = "gpt-4-1106-preview" |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
for message in st.session_state.messages: |
|
avatar = "π€" if message["role"] == "user" else "π€" |
|
|
|
with st.chat_message(message["role"], avatar=avatar): |
|
st.markdown(message["content"]) |
|
|
|
if prompt := st.chat_input("Ask your question here", ): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
with st.chat_message("user", avatar="π§βπ»"): |
|
st.markdown(prompt) |
|
|
|
with st.chat_message("assistant", avatar="π€"): |
|
message_placeholder = st.empty() |
|
full_response = "" |
|
for response in openai.ChatCompletion.create( |
|
model=st.session_state["openai_model"], |
|
messages= |
|
[{"role": "system", "content": base_prompt}] + |
|
[ |
|
{"role": m["role"], "content": m["content"]} |
|
for m in st.session_state.messages |
|
], |
|
stream=True, |
|
): |
|
full_response += response.choices[0].delta.get("content", "") |
|
message_placeholder.markdown(full_response + "β") |
|
|
|
message_placeholder.markdown(full_response) |
|
|
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |
|
|