File size: 2,642 Bytes
23a229c 0809507 23a229c 0809507 402c1d3 0809507 0e8a2bc 0809507 3b7cf58 0809507 23a229c 0809507 402c1d3 3b7cf58 1230ae3 91caeb5 0809507 91caeb5 1230ae3 402c1d3 3b7cf58 1230ae3 3b7cf58 1230ae3 402c1d3 79998a1 0809507 91caeb5 23a229c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
# https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
import os
import time
import openai
import requests
import streamlit as st
from models import bloom
from utils.util import *
# from streamlit_chat import message
st.title("Welcome to RegBotBeta")
st.header("Powered by `LlamaIndex🦙` and `OpenAI API`")
if "messages" not in st.session_state:
st.session_state.messages = []
index = None
api_key = st.text_input("Enter your OpenAI API key here:", type="password")
if api_key:
resp = validate(api_key)
if "error" in resp.json():
st.info("Invalid Token! Try again.")
else:
st.info("Success")
os.environ["OPENAI_API_KEY"] = api_key
openai.api_key = api_key
with st.spinner("Initializing vector index ..."):
index = create_index(bloom)
st.write("---")
if index:
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Say something"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.spinner("Processing your query..."):
bot_response = get_response(index, prompt)
print("bot: ", bot_response)
# Display assistant response in chat message container
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# simulate the chatbot "thinking" before responding
# (or stream its response)
for chunk in bot_response.split():
full_response += chunk + " "
time.sleep(0.05)
# add a blinking cursor to simulate typing
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
# st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append(
{"role": "assistant", "content": full_response}
)
# Scroll to the bottom of the chat container
# st.markdown(
# """
# <script>
# const chatContainer = document.getElementsByClassName("css-1n76uvr")[0];
# chatContainer.scrollTop = chatContainer.scrollHeight;
# </script>
# """,
# unsafe_allow_html=True,
# )
|