|
|
|
|
|
import os |
|
import time |
|
|
|
import openai |
|
import requests |
|
import streamlit as st |
|
|
|
from models import bloom |
|
from utils.util import * |
|
|
|
|
|
|
|
|
|
st.title("Welcome to RegBotBeta") |
|
st.header("Powered by `LlamaIndex🦙` and `OpenAI API`") |
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
index = None |
|
|
|
api_key = st.text_input("Enter your OpenAI API key here:", type="password") |
|
|
|
if api_key: |
|
resp = validate(api_key) |
|
if "error" in resp.json(): |
|
st.info("Invalid Token! Try again.") |
|
else: |
|
st.info("Success") |
|
os.environ["OPENAI_API_KEY"] = api_key |
|
openai.api_key = api_key |
|
with st.spinner("Initializing vector index ..."): |
|
index = create_index(bloom) |
|
|
|
st.write("---") |
|
if index: |
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
if prompt := st.chat_input("Say something"): |
|
|
|
st.chat_message("user").markdown(prompt) |
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
with st.spinner("Processing your query..."): |
|
bot_response = get_response(index, prompt) |
|
|
|
print("bot: ", bot_response) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
message_placeholder = st.empty() |
|
full_response = "" |
|
|
|
|
|
|
|
for chunk in bot_response.split(): |
|
full_response += chunk + " " |
|
time.sleep(0.05) |
|
|
|
|
|
message_placeholder.markdown(full_response + "▌") |
|
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
st.session_state.messages.append( |
|
{"role": "assistant", "content": full_response} |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|