Spaces:
Sleeping
Sleeping
File size: 1,792 Bytes
1a418ef 2f56d8e 3e96275 1a418ef eb7fd33 1a418ef bde3fb5 1a418ef 1d29742 1a418ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
# app cơ bản để demo RAG chatbot, sử dụng streamlit để đơn giản hoá phần frontend/U
import sys
import os
import streamlit as st
from time import time
import logging
os.environ['ROOT_PATH'] = os.path.dirname(os.path.abspath(__file__))
from api.engine import ChatEngine
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
st.title("Smart Eco Footprint")
@st.cache_resource
def initialize():
return ChatEngine(vector_index="chroma", force_new_db=False)
engine = initialize()
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if user_input := st.chat_input("Bạn muốn hỏi điều gì?"):
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
message_placeholder = st.empty()
response_content = ""
with st.spinner("Thinking..."):
start = time()
streaming_response = engine.query_streaming(user_input)
# Stream kết quả và cập nhật lên giao diện
query_end = time()
print(f"Query time calculated: {round(query_end-start,4)}")
for chunk in streaming_response.response_gen:
response_content += chunk
message_placeholder.markdown(f"{response_content}")
end = time()
print(f"Response time calculated: {round(end-start,4)}")
message_placeholder.markdown(response_content)
st.session_state.messages.append({"role": "assistant", "content": response_content}) |