Spaces:
Sleeping
Sleeping
added introductory prompt
Browse files- backend.py +55 -53
backend.py
CHANGED
@@ -69,14 +69,44 @@ def build_index(path: str):
|
|
69 |
return index
|
70 |
|
71 |
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
def handle_query(query_str: str,
|
74 |
chat_history: list[tuple[str, str]],
|
75 |
session: dict[str, Any]) -> Iterator[str]:
|
|
|
76 |
|
77 |
-
global
|
|
|
78 |
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
matched_path = None
|
81 |
words = query_str.lower()
|
82 |
for key, path in documents_paths.items():
|
@@ -84,67 +114,39 @@ def handle_query(query_str: str,
|
|
84 |
matched_path = path
|
85 |
break
|
86 |
if matched_path:
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
for user, assistant in chat_history:
|
93 |
-
conversation.extend(
|
94 |
-
[
|
95 |
-
ChatMessage(role=MessageRole.USER, content=user),
|
96 |
-
|
97 |
-
ChatMessage(role=MessageRole.ASSISTANT, content=assistant),
|
98 |
-
]
|
99 |
-
)
|
100 |
-
index = build_index("data/chiarimento.txt")
|
101 |
-
|
102 |
-
else:
|
103 |
-
# The index is already built, no need to rebuild it.
|
104 |
-
conversation: List[ChatMessage] = []
|
105 |
-
for user, assistant in chat_history:
|
106 |
-
conversation.extend(
|
107 |
-
[
|
108 |
-
ChatMessage(role=MessageRole.USER, content=user),
|
109 |
-
|
110 |
-
ChatMessage(role=MessageRole.ASSISTANT, content=assistant),
|
111 |
-
]
|
112 |
-
)
|
113 |
-
#conversation.append( ChatMessage(role=MessageRole.USER, content=query_str))
|
114 |
-
#pass
|
115 |
|
116 |
try:
|
117 |
-
|
118 |
memory = ChatMemoryBuffer.from_defaults(token_limit=None)
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
),
|
134 |
-
verbose=False,
|
135 |
)
|
136 |
|
|
|
137 |
|
138 |
outputs = []
|
139 |
-
response = chat_engine.stream_chat(query_str, conversation)
|
140 |
-
#response = chat_engine.chat(query_str)
|
141 |
for token in response.response_gen:
|
142 |
-
#if not token.startswith("system:") and not token.startswith("user:"):
|
143 |
-
|
144 |
outputs.append(token)
|
145 |
-
#print(f"Generated token: {token}")
|
146 |
yield "".join(outputs)
|
147 |
-
|
|
|
|
|
148 |
|
149 |
except Exception as e:
|
150 |
yield f"Error processing query: {str(e)}"
|
|
|
69 |
return index
|
70 |
|
71 |
|
72 |
+
# Global variables
|
73 |
+
global_index = None
|
74 |
+
global_session_state = {}
|
75 |
+
|
76 |
+
def initialize_global_state():
|
77 |
+
global global_index, global_session_state
|
78 |
+
global_index = None
|
79 |
+
global_session_state = {
|
80 |
+
"documents_loaded": False,
|
81 |
+
"document_db": None,
|
82 |
+
"original_message": None,
|
83 |
+
"clarification": False,
|
84 |
+
"conversation": []
|
85 |
+
}
|
86 |
+
|
87 |
+
# Call this at the start of your script
|
88 |
+
initialize_global_state()
|
89 |
+
|
90 |
+
@spaces.GPU(duration=30)
|
91 |
def handle_query(query_str: str,
|
92 |
chat_history: list[tuple[str, str]],
|
93 |
session: dict[str, Any]) -> Iterator[str]:
|
94 |
+
global global_index, global_session_state
|
95 |
|
96 |
+
# Update global session state with any new information from the passed session
|
97 |
+
global_session_state.update(session)
|
98 |
|
99 |
+
# Update conversation history
|
100 |
+
for user, assistant in chat_history:
|
101 |
+
global_session_state["conversation"].extend([
|
102 |
+
ChatMessage(role=MessageRole.USER, content=user),
|
103 |
+
ChatMessage(role=MessageRole.ASSISTANT, content=assistant),
|
104 |
+
])
|
105 |
+
|
106 |
+
# Add current query to conversation
|
107 |
+
global_session_state["conversation"].append(ChatMessage(role=MessageRole.USER, content=query_str))
|
108 |
+
|
109 |
+
if global_index is None:
|
110 |
matched_path = None
|
111 |
words = query_str.lower()
|
112 |
for key, path in documents_paths.items():
|
|
|
114 |
matched_path = path
|
115 |
break
|
116 |
if matched_path:
|
117 |
+
global_index = build_index(matched_path)
|
118 |
+
global_session_state["documents_loaded"] = True
|
119 |
+
else:
|
120 |
+
global_index = build_index("data/chiarimento.txt")
|
121 |
+
global_session_state["clarification"] = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
try:
|
|
|
124 |
memory = ChatMemoryBuffer.from_defaults(token_limit=None)
|
125 |
|
126 |
+
chat_engine = global_index.as_chat_engine(
|
127 |
+
chat_mode="condense_plus_context",
|
128 |
+
memory=memory,
|
129 |
+
similarity_top_k=4,
|
130 |
+
response_mode="tree_summarize",
|
131 |
+
context_prompt = (
|
132 |
+
"Sei un assistente Q&A italiano di nome Odi, che risponde solo alle domande o richieste pertinenti in modo preciso."
|
133 |
+
" Quando un utente ti chiede informazioni su di te o sul tuo creatore puoi dire che sei un assistente ricercatore creato dagli Osservatori Digitali e fornire gli argomenti di cui sei esperto."
|
134 |
+
" Ecco i documenti rilevanti per il contesto:\n"
|
135 |
+
"{context_str}"
|
136 |
+
"\nIstruzione: Usa la cronologia delle chat precedenti, o il contesto sopra, per interagire e aiutare l'utente a rispondere alla sua domanda."
|
137 |
+
),
|
138 |
+
verbose=False,
|
|
|
|
|
139 |
)
|
140 |
|
141 |
+
response = chat_engine.stream_chat(query_str, global_session_state["conversation"])
|
142 |
|
143 |
outputs = []
|
|
|
|
|
144 |
for token in response.response_gen:
|
|
|
|
|
145 |
outputs.append(token)
|
|
|
146 |
yield "".join(outputs)
|
147 |
+
|
148 |
+
# Update the session with any changes
|
149 |
+
session.update(global_session_state)
|
150 |
|
151 |
except Exception as e:
|
152 |
yield f"Error processing query: {str(e)}"
|