Update app.py
Browse files
app.py
CHANGED
@@ -311,28 +311,57 @@ prompt_template = ChatPromptTemplate.from_messages([
|
|
311 |
|
312 |
|
313 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
def process_question(question: str):
|
315 |
"""
|
316 |
-
Process the question and
|
317 |
"""
|
318 |
# Check cache first
|
319 |
if question in question_cache:
|
320 |
-
|
|
|
321 |
relevant_docs = retriever(question)
|
322 |
context = "\n".join([doc.page_content for doc in relevant_docs])
|
|
|
323 |
prompt = prompt_template.format_messages(
|
324 |
context=context,
|
325 |
question=question
|
326 |
-
|
327 |
-
|
328 |
-
|
|
|
|
|
329 |
if isinstance(chunk, str):
|
330 |
-
response += chunk
|
331 |
else:
|
332 |
-
response += chunk.content
|
|
|
|
|
|
|
333 |
# Mettez le résultat en cache à la fin
|
334 |
-
question_cache[question] = (response, context)
|
335 |
-
return response, context
|
336 |
|
337 |
# CSS personnalisé avec l'importation de Google Fonts
|
338 |
custom_css = """
|
@@ -479,17 +508,6 @@ with gr.Blocks(css=custom_css) as iface:
|
|
479 |
)
|
480 |
|
481 |
|
482 |
-
# def on_submit(question):
|
483 |
-
# response, context = process_question(question)
|
484 |
-
# return response, context
|
485 |
-
|
486 |
-
# submit_btn.click(
|
487 |
-
# fn=on_submit,
|
488 |
-
# inputs=input_text,
|
489 |
-
# outputs=[answer_box, context_box],
|
490 |
-
# api_name="predict",
|
491 |
-
# queue=True
|
492 |
-
# )
|
493 |
def on_submit(question):
|
494 |
response, context = "", ""
|
495 |
for chunk in process_question(question):
|
|
|
311 |
|
312 |
|
313 |
|
314 |
+
# def process_question(question: str):
|
315 |
+
# """
|
316 |
+
# Process the question and return the answer and context
|
317 |
+
# """
|
318 |
+
# # Check cache first
|
319 |
+
# if question in question_cache:
|
320 |
+
# return question_cache[question], "" # Retourne la réponse cachée et un statut vide
|
321 |
+
# relevant_docs = retriever(question)
|
322 |
+
# context = "\n".join([doc.page_content for doc in relevant_docs])
|
323 |
+
# prompt = prompt_template.format_messages(
|
324 |
+
# context=context,
|
325 |
+
# question=question
|
326 |
+
# )
|
327 |
+
# response = ""
|
328 |
+
# for chunk in llm.stream(prompt):
|
329 |
+
# if isinstance(chunk, str):
|
330 |
+
# response += chunk
|
331 |
+
# else:
|
332 |
+
# response += chunk.content
|
333 |
+
# # Mettez le résultat en cache à la fin
|
334 |
+
# question_cache[question] = (response, context)
|
335 |
+
# return response, context
|
336 |
+
|
337 |
def process_question(question: str):
|
338 |
"""
|
339 |
+
Process the question and yield the answer progressively.
|
340 |
"""
|
341 |
# Check cache first
|
342 |
if question in question_cache:
|
343 |
+
yield question_cache[question] # Retourne directement depuis le cache si disponible
|
344 |
+
|
345 |
relevant_docs = retriever(question)
|
346 |
context = "\n".join([doc.page_content for doc in relevant_docs])
|
347 |
+
|
348 |
prompt = prompt_template.format_messages(
|
349 |
context=context,
|
350 |
question=question
|
351 |
+
)
|
352 |
+
|
353 |
+
response = "" # Initialise la réponse
|
354 |
+
# Ici, nous supposons que 'llm.stream' est un générateur qui renvoie des chunks
|
355 |
+
for chunk in llm.stream(prompt): # suppose que llm.stream renvoie des chunks de réponse
|
356 |
if isinstance(chunk, str):
|
357 |
+
response += chunk # Accumulez la réponse si c'est déjà une chaîne
|
358 |
else:
|
359 |
+
response += chunk.content # Sinon, prenez le contenu du chunk (si chunk est un type d'objet spécifique)
|
360 |
+
|
361 |
+
yield response, context # Renvoie la réponse mise à jour et le contexte
|
362 |
+
|
363 |
# Mettez le résultat en cache à la fin
|
364 |
+
question_cache[question] = (response, context)
|
|
|
365 |
|
366 |
# CSS personnalisé avec l'importation de Google Fonts
|
367 |
custom_css = """
|
|
|
508 |
)
|
509 |
|
510 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
511 |
def on_submit(question):
|
512 |
response, context = "", ""
|
513 |
for chunk in process_question(question):
|