File size: 2,459 Bytes
99110dc
 
 
 
 
 
 
 
 
 
 
eeb62a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99110dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eeb62a0
 
99110dc
 
 
 
 
 
 
 
 
eeb62a0
99110dc
 
 
 
 
 
 
 
eeb62a0
99110dc
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# app.py

import streamlit as st
import os

# Local imports
from embedding import load_embeddings
from vectorstore import load_or_build_vectorstore
from chain_setup import build_conversational_chain

def main():
    st.set_page_config(page_title="المحادثة التفاعلية", layout="wide")
    st.title("💬 المحادثة التفاعلية - ادارة البيانات وحماية البيانات الشخصية")

    # Add custom CSS to support right-to-left text
    st.markdown(
        """
        <style>
            .rtl {
                direction: rtl;
                text-align: right;
            }
        </style>
        """,
        unsafe_allow_html=True,
    )

    # Paths and constants
    local_file = "Policies001.pdf"
    index_folder = "faiss_index"

    # Step 1: Load Embeddings
    embeddings = load_embeddings()

    # Step 2: Build or load VectorStore
    vectorstore = load_or_build_vectorstore(local_file, index_folder, embeddings)

    # Step 3: Build the Conversational Retrieval Chain
    qa_chain = build_conversational_chain(vectorstore)

    # Step 4: Session State for UI Chat
    if "messages" not in st.session_state:
        st.session_state["messages"] = [
            {"role": "assistant", "content": "👋 مرحبًا! اسألني أي شيء عن إدارة البيانات وحماية البيانات الشخصية"}
        ]

    # Display existing messages
    for msg in st.session_state["messages"]:
        with st.chat_message(msg["role"]):
            # Apply 'rtl' class for RTL text direction
            st.markdown(f'<div class="rtl">{msg["content"]}</div>', unsafe_allow_html=True)

    # Step 5: Chat Input
    user_input = st.chat_input("اكتب سؤالك")

    # Step 6: Process user input
    if user_input:
        # a) Display user message
        st.session_state["messages"].append({"role": "user", "content": user_input})
        with st.chat_message("user"):
            st.markdown(f'<div class="rtl">{user_input}</div>', unsafe_allow_html=True)

        # b) Run chain
        response_dict = qa_chain({"question": user_input})
        answer = response_dict["answer"]

        # c) Display assistant response
        st.session_state["messages"].append({"role": "assistant", "content": answer})
        with st.chat_message("assistant"):
            st.markdown(f'<div class="rtl">{answer}</div>', unsafe_allow_html=True)

if __name__ == "__main__":
    main()