Spaces:
Sleeping
Sleeping
thanhtung09t2
commited on
Commit
•
1a418ef
1
Parent(s):
81d4a36
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app cơ bản để demo RAG chatbot, sử dụng streamlit để đơn giản hoá phần frontend/U
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
import streamlit as st
|
5 |
+
from time import time
|
6 |
+
import logging
|
7 |
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
8 |
+
os.environ['ROOT_PATH'] = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
|
9 |
+
from api.engine import ChatEngine
|
10 |
+
|
11 |
+
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
12 |
+
|
13 |
+
st.title("Smart Chabot for Organic Crop powered by Eco Footprint")
|
14 |
+
|
15 |
+
@st.cache_resource
|
16 |
+
def initialize():
|
17 |
+
return ChatEngine(vector_index="chroma", force_new_db=False)
|
18 |
+
|
19 |
+
engine = initialize()
|
20 |
+
|
21 |
+
|
22 |
+
if "messages" not in st.session_state:
|
23 |
+
st.session_state.messages = []
|
24 |
+
|
25 |
+
for message in st.session_state.messages:
|
26 |
+
with st.chat_message(message["role"]):
|
27 |
+
st.markdown(message["content"])
|
28 |
+
|
29 |
+
if user_input := st.chat_input("Bạn muốn hỏi điều gì?"):
|
30 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
31 |
+
with st.chat_message("user"):
|
32 |
+
st.markdown(user_input)
|
33 |
+
|
34 |
+
with st.chat_message("assistant"):
|
35 |
+
message_placeholder = st.empty()
|
36 |
+
|
37 |
+
response_content = ""
|
38 |
+
with st.spinner("Thinking..."):
|
39 |
+
start = time()
|
40 |
+
streaming_response = engine.query_streaming(user_input)
|
41 |
+
# Stream kết quả và cập nhật lên giao diện
|
42 |
+
query_end = time()
|
43 |
+
print(f"Query time calculated: {round(query_end-start,4)}")
|
44 |
+
for chunk in streaming_response.response_gen:
|
45 |
+
response_content += chunk
|
46 |
+
message_placeholder.markdown(f"{response_content} ▌")
|
47 |
+
end = time()
|
48 |
+
print(f"Response time calculated: {round(end-start,4)}")
|
49 |
+
message_placeholder.markdown(response_content)
|
50 |
+
st.session_state.messages.append({"role": "assistant", "content": response_content})
|