ragtest-sakimilo / streamlit_app.py
lingyit1108's picture
to fix minor bug
c6352d6
raw
history blame
3.25 kB
import streamlit as st
import os
import openai
from openai import OpenAI
# App title
st.set_page_config(page_title="πŸ’¬ Open AI Chatbot")
# Replicate Credentials
with st.sidebar:
st.title('πŸ’¬ Open AI Chatbot')
st.write('This chatbot is created using the GPT model from Open AI.')
if 'OPENAI_API_KEY' in st.secrets:
st.success('API key already provided!', icon='βœ…')
openai_api = st.secrets['OPENAI_API_KEY']
else:
openai_api = st.text_input('Enter OpenAI API token:', type='password')
if not (openai_api.startswith('sk-') and len(openai_api)==51):
st.warning('Please enter your credentials!', icon='⚠️')
else:
st.success('Proceed to entering your prompt message!', icon='πŸ‘‰')
os.environ['OPENAI_API_KEY'] = openai_api
st.subheader('Models and parameters')
selected_model = st.sidebar.selectbox('Choose an OpenAI model', ['gpt-3.5-turbo-1106', 'gpt-4-1106-preview'], key='selected_model')
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=2.0, value=0.1, step=0.01)
st.markdown('πŸ“– Reach out to Sakimilo to learn how to create this app!')
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
def generate_llm_response(client, prompt_input):
system_content = ("You are a helpful assistant. "
"You do not respond as 'User' or pretend to be 'User'. "
"You only respond once as 'Assistant'."
)
completion = client.chat.completions.create(
model=selected_model,
messages=[
{"role": "system", "content": system_content},
] + st.session_state.messages,
temperature=temperature,
stream=True
)
return completion
# User-provided prompt
if prompt := st.chat_input(disabled=not openai_api):
client = OpenAI()
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = generate_llm_response(client, prompt)
placeholder = st.empty()
full_response = ''
for chunk in response:
if chunk.choices[0].delta.content is not None:
full_response += chunk.choices[0].delta.content
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)