A-New-Day-001 commited on
Commit
5426d51
1 Parent(s): c5fb375

Upload 24 files

Browse files
analytics_reports/__pycache__/extract_location.cpython-310.pyc ADDED
Binary file (709 Bytes). View file
 
analytics_reports/__pycache__/reports.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
analytics_reports/__pycache__/reports.cpython-311.pyc ADDED
Binary file (9.91 kB). View file
 
analytics_reports/__pycache__/reports.cpython-39.pyc ADDED
Binary file (4.57 kB). View file
 
analytics_reports/reports.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import seaborn as sns
4
+ import matplotlib.pyplot as plt
5
+ from matplotlib.ticker import ScalarFormatter # Import ScalarFormatter
6
+ import plotly.express as px
7
+ import numpy as np
8
+
9
+ st.set_option('deprecation.showPyplotGlobalUse', False)
10
+
11
+ # Extract location
12
+ input = 'data_3/data_test.csv'
13
+ output = 'data_3/data_test_city.csv'
14
+ # Load the addresses file into a DataFrame
15
+ addresses_df = pd.read_csv(input, encoding='UTF-8-SIG')
16
+ # print(addresses_df.head())
17
+
18
+ # Load the cities/districts file into a DataFrame
19
+ cities_districts_df = pd.read_csv('data_3/Cities.csv', encoding='UTF-8-SIG')
20
+
21
+ # Function to find city and district for each address
22
+ def find_city_district(location):
23
+ location = str(location) # Ensure location is a string
24
+ for index, row in cities_districts_df.iterrows():
25
+ if str(row["City"]) in location and str(row["District"]) in location:
26
+ return row["City"], row["District"]
27
+ return None, None
28
+
29
+ # Apply the function to the addresses DataFrame
30
+ addresses_df[["City", "District"]] = addresses_df["Location"].apply(find_city_district).apply(pd.Series)
31
+
32
+ # Save the new DataFrame to a CSV file
33
+ addresses_df.to_csv(output, index=False)
34
+
35
+ data = pd.read_csv('data_3/data_test_city.csv')
36
+ print(data.info())
37
+
38
+ df = data.dropna(subset = 'Price')
39
+ df = df.dropna(subset = 'City')
40
+ df=df[~((df['Price'] == 'Thỏa thuận'))]
41
+ df['Price'] = pd.to_numeric(df['Price'].str.replace(',', ''), errors='coerce')
42
+ df['Price'].astype(float)
43
+
44
+ print(df.info())
45
+
46
+ def plot_minmax_prices(selected_category):
47
+ # Filter the data based on the selected category
48
+ filtered_data = df[df['Category'] == selected_category]
49
+
50
+ # Create a pivot table
51
+ pivot_table = filtered_data.pivot_table(index=['City', 'Category'], values='Price', aggfunc=['min', 'max']).reset_index()
52
+ print(pivot_table.head())
53
+ pivot_table.columns=['City','Category','Min Price','Max Price']
54
+ # Display the data table for the filtered data
55
+ st.subheader('Tổng hợp Giá bất động sản cao nhất và thấp nhất ở các tỉnh thành')
56
+ st.dataframe(pivot_table)
57
+
58
+ def plot_by_category(selected_category):
59
+ # Get the unique city names and sort them alphabetically
60
+ unique_cities = sorted(df['City'].unique())
61
+ selected_city = st.sidebar.selectbox('Chọn thành phố hoặc tỉnh', unique_cities)
62
+ # Filter the data for the selected city
63
+ filtered_data = df[(df['City'] == selected_city) & (df['Category'] == selected_category)]
64
+ # Display the data table for the filtered data
65
+ # st.write('### Data Table')
66
+ # st.write(filtered_data)
67
+
68
+ # Check if data is empty
69
+ if filtered_data.empty:
70
+ print("filtered_data is empty")
71
+ st.warning(f"No data available for {selected_category} in {selected_city}.")
72
+ else:
73
+ # Plot Number of property by District
74
+ st.subheader(f'Số lượng bất động sản {selected_category} ở {selected_city}')
75
+ fig = plt.figure(figsize=(6, 3))
76
+ sns.countplot(data=filtered_data, y='District')
77
+ plt.xticks(rotation=25) # Rotate x-axis labels for better readability
78
+ plt.xlabel('Số lượng')
79
+ plt.ylabel('Quận/Huyện')
80
+ st.pyplot(fig)
81
+
82
+ # Plot Price per Area
83
+ st.subheader(f'Giá bất động sản {selected_category} theo M² ở {selected_city}')
84
+ # Create a new column for Price per Area
85
+ filtered_data['Price per Area'] = filtered_data['Price'] / filtered_data['Area']
86
+ # Plot the data
87
+ fig = plt.figure(figsize=(6, 3))
88
+ sns.barplot(data=filtered_data,y='District',x='Price per Area')
89
+ plt.xticks(rotation=45)
90
+ plt.xlabel('Giá trung bình')
91
+ plt.ylabel('Quận/Huyện')
92
+ # Show the full number of price instead of scientific notation
93
+ plt.ticklabel_format(style='plain', axis='x')
94
+ st.pyplot(fig)
95
+
96
+ # Plot the estate type by City
97
+ # Create a pie chart showing the proportion of estate types by city
98
+ st.subheader(f'Loại bất động sản ở {selected_city}')
99
+ estate_type_counts = filtered_data['Estate type'].value_counts()
100
+ fig = px.pie(
101
+ values=estate_type_counts.values,
102
+ names=estate_type_counts.index,
103
+ )
104
+ # Display the chart
105
+ st.plotly_chart(fig)
106
+
107
+ # Plot the certification status by City
108
+ # Replace empty values (including spaces) with NaN in the 'Certification Status' column
109
+ filtered_data['Certification status'] = filtered_data['Certification status'].replace(' ', pd.NA)
110
+ # Replace blank (empty) values with "Không xác định" in the 'Certification Status' column
111
+ filtered_data['Certification status'].fillna("Không xác định", inplace=True)
112
+ certification_count = len(filtered_data[filtered_data['Certification status'].notna()])
113
+ if certification_count == 0:
114
+ st.write('')
115
+ else:
116
+ # Create a pie chart showing the proportion of certification status by city
117
+ st.subheader(f'Tình trạng pháp lý của bất động sản ở {selected_city}')
118
+ certification_counts = filtered_data['Certification status'].value_counts()
119
+ fig = px.pie(
120
+ values=certification_counts.values,
121
+ names=certification_counts.index,
122
+ )
123
+ # Display the chart
124
+ st.plotly_chart(fig)
125
+
126
+ # Plot the directions per city and Category
127
+ direction_count = len(filtered_data[filtered_data['Direction'].notna()])
128
+ if direction_count == 0:
129
+ st.write('')
130
+ else:
131
+ # Create a pie chart showing the proportion of estate types by city
132
+ st.subheader(f'Hướng bất động sản {selected_category} ở {selected_city}')
133
+ # Create a horizontal bar chart
134
+ fig = plt.figure(figsize=(6, 3))
135
+ sns.set(style='whitegrid')
136
+ sns.countplot(data=filtered_data, x="Direction", palette="Spectral")
137
+ plt.xlabel('Hướng')
138
+ plt.ylabel('Số lượng')
139
+ # plt.title(f'Directions of property in {selected_city}')
140
+ plt.show()
141
+ # Display the chart
142
+ st.pyplot(fig)
143
+
144
+ # Create a pie chart showing the proportion of estate types by city
145
+ st.subheader(f'Tỷ lệ bất động sản có chỗ đậu xe ở {selected_city}')
146
+ # Create a pie chart to show the proportion of parking slot and non-parking slot
147
+ # parking_slot_count = filtered_data[filtered_data['Parking slot'].notna()]['Parking slot'].count()
148
+ parking_slot_count = len(filtered_data[~np.isnan(filtered_data['Parking slot'])])
149
+ # non_parking_slot_count = filtered_data[filtered_data['Parking slot'].isna()]['Parking slot'].count()
150
+ non_parking_slot_count = len(filtered_data[np.isnan(filtered_data['Parking slot'])])
151
+ fig_pie = px.pie(
152
+ names=['Có chỗ đậu xe', 'Không có chỗ đậu xe'],
153
+ values=[parking_slot_count, non_parking_slot_count]
154
+ )
155
+ # Display the pie chart
156
+ st.plotly_chart(fig_pie)
157
+ if parking_slot_count == 0:
158
+ st.write('')
159
+ else:
160
+ st.subheader(f'Số lượng chỗ đậu xe ở {selected_city}')
161
+ filtered_data2 = filtered_data[filtered_data['Parking slot'].notna() & (filtered_data['Parking slot'] != ' ')]
162
+ # Create a horizontal bar chart
163
+ plt.figure(figsize=(6, 3))
164
+ sns.set(style="whitegrid")
165
+ sns.countplot(data=filtered_data2, x="Parking slot", palette="Spectral")
166
+ plt.xlabel('Số lượng chỗ đậu xe/bất động sản')
167
+ plt.ylabel('Số lượng')
168
+ # Display the chart
169
+ st.pyplot()
170
+
171
+ # Create a pie chart showing the proportion of estate types by city
172
+ st.subheader(f'Tỷ lệ người bán ở {selected_city}')
173
+ # Create a pie chart to show the proportion of parking slot and non-parking slot
174
+ personal_count = filtered_data[filtered_data['Seller type'] == 'Cá Nhân - Chính Chủ']['Seller type'].count()
175
+ non_personal_count = filtered_data[filtered_data['Seller type'] == 'Công Ty Nhà Đất - Môi Giới BĐS']['Seller type'].count()
176
+ fig_pie = px.pie(
177
+ names=['Cá Nhân - Chính Chủ', 'Công Ty Nhà Đất - Môi Giới BĐS'],
178
+ values=[personal_count, non_personal_count],
179
+ )
180
+ # Display the pie chart
181
+ st.plotly_chart(fig_pie)
182
+
183
+
screens/__pycache__/analysis.cpython-311.pyc ADDED
Binary file (1.11 kB). View file
 
screens/__pycache__/analysis.cpython-39.pyc ADDED
Binary file (677 Bytes). View file
 
screens/__pycache__/chat_bot.cpython-311.pyc ADDED
Binary file (10.7 kB). View file
 
screens/__pycache__/chat_bot.cpython-39.pyc ADDED
Binary file (6.32 kB). View file
 
screens/__pycache__/chat_bot_2.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
screens/__pycache__/chat_bot_2.cpython-39.pyc ADDED
Binary file (6.1 kB). View file
 
screens/__pycache__/index.cpython-311.pyc ADDED
Binary file (1.15 kB). View file
 
screens/__pycache__/index.cpython-39.pyc ADDED
Binary file (870 Bytes). View file
 
screens/__pycache__/predict.cpython-311.pyc ADDED
Binary file (6.74 kB). View file
 
screens/__pycache__/predict.cpython-39.pyc ADDED
Binary file (3.23 kB). View file
 
screens/__pycache__/price_prediction.cpython-311.pyc ADDED
Binary file (6.49 kB). View file
 
screens/__pycache__/search.cpython-311.pyc ADDED
Binary file (20.2 kB). View file
 
screens/__pycache__/search.cpython-39.pyc ADDED
Binary file (8.79 kB). View file
 
screens/analysis.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from analytics_reports.reports import plot_minmax_prices
4
+ from analytics_reports.reports import plot_by_category
5
+
6
+ def report_analysis():
7
+ # Title of the Analysis page
8
+ st.title('Analysis')
9
+
10
+ # Load your real estate data into a DataFrame
11
+ data = pd.read_csv('data_3/data_test_city.csv')
12
+
13
+ st.header('Analytics Reports')
14
+ st.sidebar.header('Select Category')
15
+ selected_category = st.sidebar.selectbox('Choose a Category', data['Category'].unique())
16
+
screens/chat_bot.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ #Import library
3
+ import yaml
4
+ #load config.yml and parse into variables
5
+ with open("config.yml", "r") as ymlfile:
6
+ cfg = yaml.safe_load(ymlfile)
7
+ _BARD_API_KEY = cfg["API_KEY"]["Bard"]
8
+ main_path = cfg["LOCAL_PATH"]["main_path"]
9
+ chat_context_length = cfg["CHAT"]["chat_context_length"]
10
+ model_name = cfg["EMBEDDINGS"]["HuggingFaceEmbeddings"]["model_name"]
11
+ model_kwargs = cfg["EMBEDDINGS"]["HuggingFaceEmbeddings"]["model_kwargs"]
12
+ chunk_size = cfg["CHUNK"]["chunk_size"]
13
+ chunk_overlap = cfg["CHUNK"]["chunk_overlap"]
14
+
15
+ from langchain.vectorstores import Chroma
16
+ import streamlit as st
17
+ from langchain.embeddings import HuggingFaceEmbeddings
18
+ from langchain.chains import ConversationalRetrievalChain
19
+ from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
20
+ # Bard
21
+ from bardapi import Bard
22
+ from typing import Any, List, Mapping, Optional
23
+ from langchain.llms.base import LLM
24
+ from langchain.callbacks.manager import CallbackManagerForLLMRun
25
+
26
+ from streamlit_feedback import streamlit_feedback
27
+
28
+
29
+ #define Bard
30
+ class BardLLM(LLM):
31
+
32
+ @property
33
+ def _llm_type(self) -> str:
34
+ return "custom"
35
+
36
+ def _call(
37
+ self,
38
+ prompt: str,
39
+ stop: Optional[List[str]] = None,
40
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
41
+ ) -> str:
42
+ response = Bard(token=_BARD_API_KEY).get_answer(prompt)['content']
43
+ return response
44
+
45
+ @property
46
+ def _identifying_params(self) -> Mapping[str, Any]:
47
+ """Get the identifying parameters."""
48
+ return {}
49
+
50
+
51
+
52
+ def load_embeddings():
53
+ embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
54
+ chroma_index = Chroma(persist_directory=main_path+"/vectorstore/chroma_db", embedding_function=embeddings)
55
+ print("Successfully loading embeddings and indexing")
56
+ return chroma_index
57
+
58
+
59
+
60
+ def ask_with_memory(vector_store, question, chat_history_1=[], document_description=""):
61
+
62
+ llm=BardLLM()
63
+ retriever = vector_store.as_retriever( # now the vs can return documents
64
+ search_type='similarity', search_kwargs={'k': 3})
65
+
66
+ general_system_template = f"""
67
+ You are a professional consultant at a real estate consulting company, providing consulting services \
68
+ to customers on real estate development strategies, real estate news and real estate law.\
69
+ Your role is to communicate with customer, then interact with them about their concerns about real estates.\
70
+ Once the customer has been provided their question,\
71
+ then you obtain some documents about real estate laws or real estate news related to their question.\
72
+ Then you will examine these documents .\
73
+ You must provide the answer based on these documents which means\
74
+ using only the heading and piece of context to answer the questions at the end.\
75
+ If you don't know the answer just say that you don't know, don't try to make up an answer. \
76
+ If the question is not in the field of real estate , just answer that you do not know. \
77
+ You respond in a short, very conversational friendly style.\
78
+ Answer only in Vietnamese\
79
+ ----
80
+ HEADING: ({document_description})
81
+ CONTEXT: {{context}}
82
+ ----
83
+ """
84
+ general_user_template = """Here is the next question, remember to only answer if you can from the provided context.
85
+ If the question is not relevant to real estate , just answer that you do not know, do not create your own answer.
86
+ Only respond in Vietnamese.
87
+ QUESTION:```{question}```"""
88
+
89
+ messages_1 = [
90
+ SystemMessagePromptTemplate.from_template(general_system_template),
91
+ HumanMessagePromptTemplate.from_template(general_user_template)
92
+ ]
93
+ qa_prompt = ChatPromptTemplate.from_messages( messages_1 )
94
+
95
+
96
+ crc = ConversationalRetrievalChain.from_llm(llm, retriever, combine_docs_chain_kwargs={'prompt': qa_prompt})
97
+ result = crc({'question': question, 'chat_history': chat_history_1})
98
+ return result
99
+
100
+
101
+ def clear_history():
102
+ if "history_1" in st.session_state:
103
+ st.session_state.history_1 = []
104
+ st.session_state.messages_1 = []
105
+
106
+ # Define a function for submitting feedback
107
+ def _submit_feedback(user_response, emoji=None):
108
+ st.toast(f"Feedback submitted: {user_response}", icon=emoji)
109
+ return user_response.update({"some metadata": 123})
110
+
111
+
112
+ def format_chat_history(chat_history_1):
113
+ formatted_history = ""
114
+ for entry in chat_history_1:
115
+ question, answer = entry
116
+ # Added an extra '\n' for the blank line
117
+ formatted_history += f"Question: {question}\nAnswer: {answer}\n\n"
118
+ return formatted_history
119
+
120
+ def run_chatbot():
121
+ with st.sidebar.title("Sidebar"):
122
+ if st.button("Clear History"):
123
+ clear_history()
124
+
125
+ st.title("🦾 Law/News chatbot")
126
+
127
+ # Initialize the chatbot and load embeddings
128
+ if "messages_1" not in st.session_state:
129
+ with st.spinner("Initializing, please wait a moment!!!"):
130
+ st.session_state.vector_store = load_embeddings()
131
+ st.success("Finish!!!")
132
+ st.session_state["messages_1"] = [{"role": "assistant", "content": "Tôi có thể giúp gì được cho bạn?"}]
133
+
134
+ messages_1 = st.session_state.messages_1
135
+ feedback_kwargs = {
136
+ "feedback_type": "thumbs",
137
+ "optional_text_label": "Please provide extra information",
138
+ "on_submit": _submit_feedback,
139
+ }
140
+
141
+ for n, msg in enumerate(messages_1):
142
+ st.chat_message(msg["role"]).write(msg["content"])
143
+
144
+ if msg["role"] == "assistant" and n > 1:
145
+ feedback_key = f"feedback_{int(n/2)}"
146
+
147
+ if feedback_key not in st.session_state:
148
+ st.session_state[feedback_key] = None
149
+
150
+ streamlit_feedback(
151
+ **feedback_kwargs,
152
+ key=feedback_key,
153
+ )
154
+
155
+
156
+ chat_history_placeholder = st.empty()
157
+ if "history_1" not in st.session_state:
158
+ st.session_state.history_1 = []
159
+
160
+ if prompt := st.chat_input():
161
+ if "vector_store" in st.session_state:
162
+ vector_store = st.session_state["vector_store"]
163
+
164
+ q = prompt
165
+
166
+ st.session_state.messages_1.append({"role": "user", "content": prompt})
167
+ st.chat_message("user").write(prompt)
168
+ with st.spinner("Thinking..."):
169
+ response = ask_with_memory(vector_store, q, st.session_state.history_1)
170
+
171
+ if len(st.session_state.history_1) >= chat_context_length:
172
+ st.session_state.history_1 = st.session_state.history_1[1:]
173
+
174
+ st.session_state.history_1.append((q, response['answer']))
175
+
176
+ chat_history_str = format_chat_history(st.session_state.history_1)
177
+
178
+ msg = {"role": "assistant", "content": response['answer']}
179
+ st.session_state.messages_1.append(msg)
180
+ st.chat_message("assistant").write(msg["content"])
181
+
182
+ # Display the feedback component after the chatbot responds
183
+ feedback_key = f"feedback_{len(st.session_state.messages_1) - 1}"
184
+ streamlit_feedback(
185
+ **feedback_kwargs,
186
+ key=feedback_key,
187
+ )
screens/chat_bot_2.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ #Import library
3
+ import yaml
4
+ #load config.yml and parse into variables
5
+ with open("config.yml", "r") as ymlfile:
6
+ cfg = yaml.safe_load(ymlfile)
7
+ _BARD_API_KEY = cfg["API_KEY"]["Bard"]
8
+ main_path = cfg["LOCAL_PATH"]["main_path"]
9
+ chat_context_length = cfg["CHAT"]["chat_context_length"]
10
+ model_name = cfg["EMBEDDINGS"]["HuggingFaceEmbeddings"]["model_name"]
11
+ model_kwargs = cfg["EMBEDDINGS"]["HuggingFaceEmbeddings"]["model_kwargs"]
12
+ chunk_size = cfg["CHUNK"]["chunk_size"]
13
+ chunk_overlap = cfg["CHUNK"]["chunk_overlap"]
14
+
15
+ import os
16
+ from dotenv import load_dotenv, find_dotenv
17
+ from langchain.vectorstores import Chroma
18
+ import streamlit.components.v1 as components
19
+ import streamlit as st
20
+ import sys
21
+ from langchain.embeddings import HuggingFaceEmbeddings
22
+ from langchain.chains import ConversationalRetrievalChain
23
+ from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
24
+ # Bard
25
+ from bardapi import Bard
26
+ from typing import Any, List, Mapping, Optional
27
+ from getpass import getpass
28
+ import os
29
+ from langchain.llms.base import LLM
30
+ from langchain.callbacks.manager import CallbackManagerForLLMRun
31
+
32
+ from streamlit_feedback import streamlit_feedback
33
+
34
+
35
+ #define Bard
36
+ class BardLLM(LLM):
37
+
38
+ @property
39
+ def _llm_type(self) -> str:
40
+ return "custom"
41
+
42
+ def _call(
43
+ self,
44
+ prompt: str,
45
+ stop: Optional[List[str]] = None,
46
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
47
+ ) -> str:
48
+ response = Bard(token=_BARD_API_KEY).get_answer(prompt)['content']
49
+ return response
50
+
51
+ @property
52
+ def _identifying_params(self) -> Mapping[str, Any]:
53
+ """Get the identifying parameters."""
54
+ return {}
55
+
56
+
57
+
58
+ def load_embeddings():
59
+ embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)
60
+ chroma_index = Chroma(persist_directory="./chroma_index_1", embedding_function=embeddings)
61
+ print("Successfully loading embeddings and indexing")
62
+ return chroma_index
63
+
64
+
65
+ def ask_with_memory(vector_store, question, chat_history=[], document_description=""):
66
+
67
+ llm=BardLLM()
68
+ retriever = vector_store.as_retriever( # now the vs can return documents
69
+ search_type='similarity', search_kwargs={'k': 3})
70
+
71
+ general_system_template = f"""
72
+ Use the following pieces of context to answer the question at the end.
73
+ If you don't know the answer, just say that you don't know, don't try to
74
+ make up an answer.
75
+ Imagine you're talking to a friend and use natural language and phrasing.
76
+ You can only use Vietnamese do not use other languages.
77
+ Suggest using out searching function for more information.
78
+ ----
79
+ CONTEXT: {{context}}
80
+ ----
81
+ """
82
+ general_user_template = """Here is the next question, remember to only answer if you can from the provided context.
83
+ If the question is not relevant to real estate , just answer that you do not know, do not create your own answer.
84
+ Do not recommend or propose any infomation of the properties.
85
+ Be sure to respond in a complete sentence, being comprehensive, including all information in the provided context.
86
+ Imagine you're talking to a friend and use natural language and phrasing.
87
+ Only respond in Vietnamese.
88
+ QUESTION:```{question}```"""
89
+
90
+ messages = [
91
+ SystemMessagePromptTemplate.from_template(general_system_template),
92
+ HumanMessagePromptTemplate.from_template(general_user_template)
93
+ ]
94
+ qa_prompt = ChatPromptTemplate.from_messages( messages )
95
+
96
+
97
+ crc = ConversationalRetrievalChain.from_llm(llm, retriever, combine_docs_chain_kwargs={'prompt': qa_prompt})
98
+ result = crc({'question': question, 'chat_history': chat_history})
99
+ return result
100
+
101
+
102
+ def clear_history():
103
+ if "history" in st.session_state:
104
+ st.session_state.history = []
105
+ st.session_state.messages = []
106
+
107
+ # Define a function for submitting feedback
108
+ def _submit_feedback(user_response, emoji=None):
109
+ st.toast(f"Feedback submitted: {user_response}", icon=emoji)
110
+ return user_response.update({"some metadata": 123})
111
+
112
+
113
+ def format_chat_history(chat_history):
114
+ formatted_history = ""
115
+ for entry in chat_history:
116
+ question, answer = entry
117
+ # Added an extra '\n' for the blank line
118
+ formatted_history += f"Question: {question}\nAnswer: {answer}\n\n"
119
+ return formatted_history
120
+
121
+ def run_chatbot_2():
122
+ with st.sidebar.title("Sidebar"):
123
+ if st.button("Clear History"):
124
+ clear_history()
125
+
126
+ st.title("🤖 Real Estate chatbot")
127
+
128
+ # Initialize the chatbot and load embeddings
129
+ if "messages" not in st.session_state:
130
+ with st.spinner("Initializing, please wait a moment!!!"):
131
+ st.session_state.vector_store = load_embeddings()
132
+ st.success("Finish!!!")
133
+ st.session_state["messages"] = [{"role": "assistant", "content": "Tôi có thể giúp gì được cho bạn?"}]
134
+
135
+ messages = st.session_state.messages
136
+ feedback_kwargs = {
137
+ "feedback_type": "thumbs",
138
+ "optional_text_label": "Please provide extra information",
139
+ "on_submit": _submit_feedback,
140
+ }
141
+
142
+ for n, msg in enumerate(messages):
143
+ st.chat_message(msg["role"]).write(msg["content"])
144
+
145
+ if msg["role"] == "assistant" and n > 1:
146
+ feedback_key = f"feedback_{int(n/2)}"
147
+
148
+ if feedback_key not in st.session_state:
149
+ st.session_state[feedback_key] = None
150
+
151
+ streamlit_feedback(
152
+ **feedback_kwargs,
153
+ key=feedback_key,
154
+ )
155
+
156
+ chat_history_placeholder = st.empty()
157
+ if "history" not in st.session_state:
158
+ st.session_state.history = []
159
+
160
+ if prompt := st.chat_input():
161
+ if "vector_store" in st.session_state:
162
+ vector_store = st.session_state["vector_store"]
163
+
164
+ q = prompt
165
+
166
+ st.session_state.messages.append({"role": "user", "content": prompt})
167
+ st.chat_message("user").write(prompt)
168
+
169
+ response = ask_with_memory(vector_store, q, st.session_state.history)
170
+
171
+ if len(st.session_state.history) >= chat_context_length:
172
+ st.session_state.history = st.session_state.history[1:]
173
+
174
+ st.session_state.history.append((q, response['answer']))
175
+
176
+ chat_history_str = format_chat_history(st.session_state.history)
177
+
178
+ msg = {"role": "assistant", "content": response['answer']}
179
+ st.session_state.messages.append(msg)
180
+ st.chat_message("assistant").write(msg["content"])
181
+
182
+ # Display the feedback component after the chatbot responds
183
+ feedback_key = f"feedback_{len(st.session_state.messages) - 1}"
184
+ streamlit_feedback(
185
+ **feedback_kwargs,
186
+ key=feedback_key,
187
+ )
screens/index.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from screens.search import Search_Property
2
+ from screens.chat_bot import run_chatbot
3
+ from screens.chat_bot_2 import run_chatbot_2
4
+ from screens.analysis import report_analysis
5
+ from screens.predict import predict_price
6
+ from utils.index import get_hash
7
+
8
+ def get_routes():
9
+ screens = [
10
+ {
11
+ "component": predict_price,
12
+ "name": "Price Prediction",
13
+ "icon": "piggy-bank"
14
+ },
15
+ {
16
+ "component": report_analysis,
17
+ "name": "Report Analysis",
18
+ "icon": "bi-bar-chart-line"
19
+ },
20
+ {
21
+ "component": Search_Property,
22
+ "name": "Search",
23
+ "icon": "search"
24
+ },
25
+ {
26
+ "component": run_chatbot,
27
+ "name": "Law/News chatbot",
28
+ "icon": "chat"
29
+ },
30
+ {
31
+ "component": run_chatbot_2,
32
+ "name": "Real Estate chatbot",
33
+ "icon": "chat-dots"
34
+ }
35
+ ]
36
+
37
+ return get_hash(screens)
screens/predict.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ from autogluon.multimodal import MultiModalPredictor
5
+ from autogluon.tabular import TabularPredictor
6
+
7
+ # Define icons
8
+ seller_icon = "🏡"
9
+ buyer_icon = "🔍"
10
+ submit_icon = "📝"
11
+ predict_icon = "🔮"
12
+
13
+ # Initialize df as a global variable
14
+ df = None
15
+
16
+ def predict_price():
17
+ global df # Declare df as a global variable
18
+ # Set the title and subheader
19
+ st.title("Real Estate Price Prediction")
20
+ st.subheader("Choose your role and provide property details")
21
+
22
+ # User role selection
23
+ option = st.selectbox("Who are you?", ['Seller', 'Buyer'], index=0)
24
+
25
+ if option == "Seller":
26
+ st.subheader(f"{seller_icon} Seller Information")
27
+ with st.spinner("Loading model..."):
28
+ predictor = MultiModalPredictor.load("C:/Users/duong/OneDrive/Desktop/mm-nlp-image-transformer")
29
+ st.success("Done")
30
+ description = st.text_area("Property Description", help="Describe your property")
31
+ title = st.text_input("Property Title", help="Enter a title for your property")
32
+ else:
33
+ st.subheader(f"{buyer_icon} Buyer Information")
34
+ with st.spinner("Loading model..."):
35
+ predictor = TabularPredictor.load("C:/Users/duong/OneDrive/Desktop/tabular", require_py_version_match=False)
36
+ st.success("Done")
37
+
38
+ # Property details input
39
+ area = st.number_input("Property Area (square meters)", min_value=1)
40
+ location = st.text_input("Property Location", help="Enter the location of the property")
41
+ city_code = st.text_input("City Code", help="Enter the city code")
42
+ district = st.text_input("District", help="Enter the district name")
43
+ bedroom = st.slider("Number of Bedrooms", min_value=1, max_value=10, value=5, step=1)
44
+ bathroom = st.slider("Number of Bathrooms", min_value=1, max_value=10, value=2, step=1)
45
+
46
+ # Submit button to create the DataFrame
47
+ submitted = st.button(f"{submit_icon} Submit")
48
+
49
+ # Create a DataFrame from user inputs
50
+ if submitted:
51
+ if area and location and city_code and district and bedroom and bathroom:
52
+ if option == "Seller":
53
+ if (not description or not title):
54
+ st.error("Please fill in both Description and Title fields for Sellers.")
55
+ else:
56
+ data = {
57
+ "Price": np.nan,
58
+ "Area": [area],
59
+ "Location": [location],
60
+ "Time stamp": np.nan,
61
+ "Certification status": np.nan,
62
+ "Direction": np.nan,
63
+ "Bedrooms": [bedroom],
64
+ "Bathrooms": [bathroom],
65
+ "Front width": np.nan,
66
+ "Floor": np.nan,
67
+ "Image URL": np.nan,
68
+ "Road width": np.nan,
69
+ "City_code": [city_code],
70
+ "DistrictId": [district],
71
+ "Balcony_Direction": np.nan,
72
+ "Longitude": np.nan,
73
+ "Lattitude": np.nan,
74
+ "Description": [description],
75
+ "Title": [title]
76
+ }
77
+ df = pd.DataFrame(data)
78
+ st.write(f"{seller_icon} Input Data:")
79
+ st.dataframe(df)
80
+ elif option == "Buyer":
81
+ data = {
82
+ "Price": np.nan,
83
+ "Area": [area],
84
+ "Location": [location],
85
+ "Time stamp": np.nan,
86
+ "Certification status": np.nan,
87
+ "Direction": np.nan,
88
+ "Bedrooms": [bedroom],
89
+ "Bathrooms": [bathroom],
90
+ "Front width": np.nan,
91
+ "Floor": np.nan,
92
+ "Image URL": np.nan,
93
+ "Road width": np.nan,
94
+ "City_code": [city_code],
95
+ "DistrictId": [district],
96
+ "Balcony_Direction": np.nan,
97
+ "Longitude": np.nan,
98
+ "Lattitude": np.nan
99
+ }
100
+ df = pd.DataFrame(data)
101
+ st.write(f"{buyer_icon} Input Data:")
102
+ st.dataframe(df)
103
+ else:
104
+ st.error("Please fill in all fields to have a better prediction!")
105
+
106
+ # Prediction button (enabled only when data has been submitted)
107
+ if st.button(f"{predict_icon} Predict"):
108
+ with st.spinner("Loading..."):
109
+ # Perform predictions and calculations here
110
+ predictions = predictor.predict(df.drop(columns="Price"))
111
+ st.success(f"Predicted Price: {predictions[0]:,.0f} VND")
112
+
113
+ scores = predictor.evaluate(
114
+ df,
115
+ metrics=[
116
+ "mean_squared_error",
117
+ "r2",
118
+ ],
119
+ )
120
+
121
+ st.subheader("Model Evaluation Metrics:")
122
+ for metric, score in scores.items():
123
+ st.write(f"{metric}: {score:.2f}")
124
+
125
+
screens/search.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import streamlit.components.v1 as components
4
+ from io import BytesIO
5
+ import requests
6
+ import ast
7
+
8
+ from langchain import PromptTemplate
9
+ from langchain.chains import RetrievalQA
10
+ from langchain.vectorstores import Chroma
11
+ from langchain.embeddings import SentenceTransformerEmbeddings
12
+ from bardapi import Bard
13
+ from typing import Any, List, Mapping, Optional
14
+
15
+ import yaml
16
+ with open("config.yml", "r") as ymlfile:
17
+ cfg = yaml.safe_load(ymlfile)
18
+ os.environ['_BARD_API_KEY'] = cfg["API_KEY"]["Bard"]
19
+
20
+ from langchain.llms.base import LLM
21
+ from langchain.callbacks.manager import CallbackManagerForLLMRun
22
+ class BardLLM(LLM):
23
+
24
+
25
+ @property
26
+ def _llm_type(self) -> str:
27
+ return "custom"
28
+
29
+ def _call(
30
+ self,
31
+ prompt: str,
32
+ stop: Optional[List[str]] = None,
33
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
34
+ ) -> str:
35
+ response = Bard(token=os.environ['_BARD_API_KEY']).get_answer(prompt)['content']
36
+ return response
37
+
38
+ @property
39
+ def _identifying_params(self) -> Mapping[str, Any]:
40
+ """Get the identifying parameters."""
41
+ return {}
42
+
43
+ @st.cache_data
44
+ def get_image(url):
45
+ r = requests.get(url)
46
+ return BytesIO(r.content)
47
+
48
+
49
+ # Define global variables
50
+ embeddings = None
51
+ index = None
52
+ QUESTION_PROMPT = None
53
+ qa = None
54
+ result = []
55
+
56
+ # Custom session state class for managing pagination
57
+ class SessionState:
58
+ def __init__(self):
59
+ self.page_index = 0 # Initialize page index
60
+ self.database_loaded = False # Initialize database loaded state
61
+
62
+ # Create a session state object
63
+ session_state = SessionState()
64
+
65
+ # Define the search function outside of Search_Property
66
+ def display_search_results(result, start_idx, end_idx):
67
+ if result:
68
+ st.subheader("Search Results:")
69
+ for idx in range(start_idx, end_idx):
70
+ if idx >= len(result):
71
+ break
72
+ property_info = result[idx]
73
+ st.markdown(f"**Result {idx + 1}**")
74
+
75
+ # Display property information
76
+ if 'Image URL' in property_info.metadata and property_info.metadata['Image URL'] is not None and not isinstance(property_info.metadata['Image URL'], float):
77
+ image_path_urls = property_info.metadata['Image URL']
78
+ if image_path_urls is not None and not isinstance(image_path_urls, float):
79
+ # Convert the string to a Python list
80
+ imageUrls = ast.literal_eval(image_path_urls)
81
+
82
+ # Now, imageUrls is a list of strings
83
+ st.image(imageUrls[0],width=700)
84
+
85
+ st.markdown(f"🏡 {property_info.metadata['Title']}")
86
+ if 'Location' in property_info.metadata and property_info.metadata['Location'] is not None and not isinstance(property_info.metadata['Location'], float):
87
+ st.write(f"📍 Address: {property_info.metadata['Location']}")
88
+ if 'Area' in property_info.metadata and property_info.metadata['Area'] is not None and not isinstance(property_info.metadata['Area'], float):
89
+ st.markdown(f"📏 Size: {property_info.metadata['Area']}")
90
+ if 'Price' in property_info.metadata and property_info.metadata['Price'] is not None and not isinstance(property_info.metadata['Price'], float):
91
+ st.markdown(f"💰 Price: {property_info.metadata['Price']} ")
92
+ st.markdown(f"📅 Published Date: {property_info.metadata['Time stamp']}")
93
+ col3, col4 = st.columns([2, 1])
94
+ with col3:
95
+ with st.expander("Full Property Information"):
96
+ st.write(f"🏡 Property Title: {property_info.metadata['Title']}")
97
+ if 'Area' in property_info.metadata and property_info.metadata['Area'] is not None and not isinstance(property_info.metadata['Area'], float):
98
+ st.write(f"📏 Size: {property_info.metadata['Area']}")
99
+ if 'Category' in property_info.metadata and property_info.metadata['Category'] is not None and not isinstance(property_info.metadata['Category'], float):
100
+ st.write(f"🏢 Category: {property_info.metadata['Category']}")
101
+ if 'Description' in property_info.metadata and property_info.metadata['Description'] is not None and not isinstance(property_info.metadata['Description'], float):
102
+ st.write(f"📝 Description: {property_info.metadata['Description']}")
103
+ if 'Price' in property_info.metadata and property_info.metadata['Price'] is not None and not isinstance(property_info.metadata['Price'], float):
104
+ st.write(f"💰 Price: {property_info.metadata['Price']}")
105
+ st.write(f"📅 Date: {property_info.metadata['Time stamp']}")
106
+ if 'Location' in property_info.metadata and property_info.metadata['Location'] is not None and not isinstance(property_info.metadata['Location'], float):
107
+ st.write(f"📍 Address: {property_info.metadata['Location']}")
108
+ st.write(f"🆔 ID: {property_info.metadata['ID']}")
109
+ if 'Estate type' in property_info.metadata and property_info.metadata['Estate type'] is not None and not isinstance(property_info.metadata['Estate type'], float):
110
+ st.write(f"🏠 Housing Type: {property_info.metadata['Estate type']}")
111
+ if 'Email' in property_info.metadata and property_info.metadata['Email'] is not None and not isinstance(property_info.metadata['Email'], float):
112
+ st.write(f"✉️ Email: {property_info.metadata['Email']}")
113
+ if 'Mobile Phone' in property_info.metadata and property_info.metadata['Mobile Phone'] is not None and not isinstance(property_info.metadata['Mobile Phone'], float):
114
+ st.write(f"📞 Phone: {property_info.metadata['Mobile Phone']}")
115
+ if 'Certification status' in property_info.metadata and property_info.metadata['Certification status'] is not None and not isinstance(property_info.metadata['Certification status'], float):
116
+ st.write(f"🏆 Certification status: {property_info.metadata['Certification status']}")
117
+ if 'Direction' in property_info.metadata and property_info.metadata['Direction'] is not None and not isinstance(property_info.metadata['Direction'], float):
118
+ st.write(f"🧭 Direction: {property_info.metadata['Direction']}")
119
+ if 'Rooms' in property_info.metadata and property_info.metadata['Rooms'] is not None and not isinstance(property_info.metadata['Rooms'], float):
120
+ st.write(f"🚪 Rooms: {property_info.metadata['Rooms']}")
121
+ if 'Bedrooms' in property_info.metadata and property_info.metadata['Bedrooms'] is not None and not isinstance(property_info.metadata['Bedrooms'], float):
122
+ st.write(f"🛏️ Bedrooms: {property_info.metadata['Bedrooms']}")
123
+ if 'Kitchen' in property_info.metadata and property_info.metadata['Kitchen'] is not None and not isinstance(property_info.metadata['Kitchen'], float):
124
+ st.write(f"🍽️ Kitchen: {property_info.metadata['Kitchen']}")
125
+ if 'Living room' in property_info.metadata and property_info.metadata['Living room'] is not None and not isinstance(property_info.metadata['Living room'], float):
126
+ st.write(f"🛋️ Living room: {property_info.metadata['Living room']}")
127
+ if 'Bathrooms' in property_info.metadata and property_info.metadata['Bathrooms'] is not None and not isinstance(property_info.metadata['Bathrooms'], float):
128
+ st.write(f"🚽 Bathrooms: {property_info.metadata['Bathrooms']}")
129
+ if 'Front width' in property_info.metadata and property_info.metadata['Front width'] is not None and not isinstance(property_info.metadata['Front width'], float):
130
+ st.write(f"📐 Front width: {property_info.metadata['Front width']}")
131
+ if 'Floor' in property_info.metadata and property_info.metadata['Floor'] is not None and not isinstance(property_info.metadata['Floor'], float):
132
+ st.write(f"🧱 Floor: {property_info.metadata['Floor']}")
133
+ if 'Parking Slot' in property_info.metadata and property_info.metadata['Parking Slot'] is not None and not isinstance(property_info.metadata['Parking Slot'], float):
134
+ st.write(f"🚗 Parking Slot: {property_info.metadata['Parking Slot']}")
135
+ if 'Seller name' in property_info.metadata and property_info.metadata['Seller name'] is not None and not isinstance(property_info.metadata['Seller name'], float):
136
+ st.write(f"👤 Seller Name: {property_info.metadata['Seller name']}")
137
+ if 'Seller type' in property_info.metadata and property_info.metadata['Seller type'] is not None and not isinstance(property_info.metadata['Seller type'], float):
138
+ st.write(f"👨‍💼 Seller type: {property_info.metadata['Seller type']}")
139
+ if 'Seller Address' in property_info.metadata and property_info.metadata['Seller Address'] is not None and not isinstance(property_info.metadata['Seller Address'], float):
140
+ st.write(f"📌 Seller Address: {property_info.metadata['Seller Address']}")
141
+ if 'Balcony Direction' in property_info.metadata and property_info.metadata['Balcony Direction'] is not None and not isinstance(property_info.metadata['Balcony Direction'], float):
142
+ st.write(f"🌄 Balcony Direction: {property_info.metadata['Balcony Direction']}")
143
+ if 'Furniture' in property_info.metadata and property_info.metadata['Furniture'] is not None and not isinstance(property_info.metadata['Furniture'], float):
144
+ st.write(f"🛋️ Furniture: {property_info.metadata['Furniture']}")
145
+ if 'Toilet' in property_info.metadata and property_info.metadata['Toilet'] is not None and not isinstance(property_info.metadata['Toilet'], float):
146
+ st.write(f"🚽 Toilet: {property_info.metadata['Toilet']}")
147
+
148
+ with col4:
149
+ st.empty()
150
+ if 'Image URL' in property_info.metadata and property_info.metadata['Image URL'] is not None and not isinstance(property_info.metadata['Image URL'], float):
151
+ imageCarouselComponent = components.declare_component("image-carousel-component", path="./frontend/public")
152
+ image_path_urls = property_info.metadata['Image URL']
153
+ if image_path_urls is not None and not isinstance(image_path_urls, float):
154
+ # Convert the string to a Python list
155
+ imageUrls = ast.literal_eval(image_path_urls)
156
+ if len(imageUrls) > 1:
157
+ selectedImageUrl = imageCarouselComponent(imageUrls=imageUrls, height=200)
158
+ if selectedImageUrl is not None:
159
+ st.image(selectedImageUrl)
160
+
161
+ # Add a divider after displaying property info
162
+ st.markdown("<hr style='border: 2px solid white'>", unsafe_allow_html=True) # Horizontal rule as a divider
163
+
164
+
165
+ def Search_Property():
166
+ global embeddings, index, result, QUESTION_PROMPT, qa
167
+
168
+ st.title("🏘️ Property Search ")
169
+ # Load data and create the search
170
+ if not session_state.database_loaded:
171
+ st.info("Loading database... This may take a moment.")
172
+ embeddings = SentenceTransformerEmbeddings(model_name="keepitreal/vietnamese-sbert")
173
+ # Create a Chroma object with persistence
174
+ db = Chroma(persist_directory="./chroma_index_1", embedding_function=embeddings)
175
+ # Get documents from the database
176
+ db.get()
177
+ llm=BardLLM()
178
+ qa = RetrievalQA.from_chain_type(
179
+ llm=llm,
180
+ chain_type="stuff",
181
+ retriever=db.as_retriever(search_type="similarity", search_kwargs={"k":4}),
182
+ return_source_documents=True)
183
+ question_template = """
184
+ Context: You are a helpful and informative bot that answers questions posed below using provided context.\
185
+ You have to be truthful. Do not recommend or propose any infomation of the properties.\
186
+ Be sure to respond in a complete sentence, being comprehensive, including all information in the provided context.\
187
+ Imagine you're talking to a friend and use natural language and phrasing.\
188
+ You can only use Vietnamese do not use other languages.
189
+
190
+ QUESTION: '{question}'
191
+
192
+ ANSWER:
193
+ """
194
+ QUESTION_PROMPT = PromptTemplate(
195
+ template=question_template, input_variables=["question"]
196
+ )
197
+ session_state.database_loaded = True
198
+
199
+ if session_state.database_loaded:
200
+ col1, col2 = st.columns([2, 1]) # Create a two-column layout
201
+
202
+ with col1:
203
+ query = st.text_input("Enter your property search query:")
204
+ search_button = st.button("Search", help="Click to start the search")
205
+
206
+ if search_button:
207
+ if not query:
208
+ st.warning("Please input your query")
209
+ else:
210
+ with st.spinner("Searching..."):
211
+ if query is not None: # Check if model_embedding is not None
212
+ qa.combine_documents_chain.llm_chain.prompt = QUESTION_PROMPT
213
+ qa.combine_documents_chain.verbose = True
214
+ qa.return_source_documents = True
215
+ results = qa({"query":query,})
216
+ result = results["source_documents"]
217
+ session_state.page_index = 0 # Reset page index when a new search is performed
218
+
219
+ with col2:
220
+ if len(result) > 0:
221
+ st.info(f'Total Results: {len(result)} properties found.') # Display "Total Results" in the second column
222
+
223
+ if result:
224
+ N = 5
225
+ prev_button, next_button = st.columns([4,1])
226
+ last_page = len(result) // N
227
+
228
+
229
+ # Update page index based on button clicks
230
+ if prev_button.button("Previous", key="prev_button"):
231
+ if session_state.page_index - 1 < 0:
232
+ session_state.page_index = last_page
233
+ else:
234
+ session_state.page_index -= 1
235
+
236
+ if next_button.button("Next", key="next_button"):
237
+ if session_state.page_index > last_page:
238
+ st.warning("Displayed all results")
239
+ session_state.page_index = 0
240
+ else:
241
+ session_state.page_index += 1
242
+
243
+ # Calculate the range of results to display (5 properties at a time)
244
+ start_idx = session_state.page_index * N
245
+ end_idx = (1 + session_state.page_index) * N
246
+
247
+ # Display results for the current page
248
+ display_search_results(result, start_idx, end_idx)