Update app.py
Browse files
app.py
CHANGED
@@ -227,34 +227,34 @@ prompt_template = ChatPromptTemplate.from_messages([
|
|
227 |
|
228 |
|
229 |
|
230 |
-
def process_question(question: str):
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
|
259 |
# # Custom CSS for right-aligned text in textboxes
|
260 |
# custom_css = """
|
@@ -309,6 +309,8 @@ def process_question(question: str):
|
|
309 |
# show_error=True
|
310 |
# )
|
311 |
|
|
|
|
|
312 |
# CSS personnalisé avec l'importation de Google Fonts
|
313 |
custom_css = """
|
314 |
/* Import Google Fonts - Noto Sans Arabic */
|
@@ -415,7 +417,35 @@ button.primary-button:hover {
|
|
415 |
}
|
416 |
"""
|
417 |
|
418 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
419 |
with gr.Blocks(css=custom_css) as iface:
|
420 |
with gr.Column(elem_classes="container"):
|
421 |
gr.Markdown(
|
@@ -451,12 +481,13 @@ with gr.Blocks(css=custom_css) as iface:
|
|
451 |
variant="primary"
|
452 |
)
|
453 |
|
454 |
-
# Statut avec la nouvelle police
|
455 |
status_text = gr.Markdown("", elem_classes="rtl-text status-text")
|
456 |
|
457 |
def on_submit(question):
|
458 |
-
|
459 |
-
|
|
|
|
|
460 |
|
461 |
submit_btn.click(
|
462 |
fn=on_submit,
|
|
|
227 |
|
228 |
|
229 |
|
230 |
+
# def process_question(question: str):
|
231 |
+
# """
|
232 |
+
# Process the question and yield the answer progressively.
|
233 |
+
# """
|
234 |
+
# # Check cache first
|
235 |
+
# if question in question_cache:
|
236 |
+
# yield question_cache[question] # Retourne directement depuis le cache si disponible
|
237 |
+
|
238 |
+
# relevant_docs = retriever(question)
|
239 |
+
# context = "\n".join([doc.page_content for doc in relevant_docs])
|
240 |
+
|
241 |
+
# prompt = prompt_template.format_messages(
|
242 |
+
# context=context,
|
243 |
+
# question=question
|
244 |
+
# )
|
245 |
+
|
246 |
+
# response = "" # Initialise la réponse
|
247 |
+
# # Ici, nous supposons que 'llm.stream' est un générateur qui renvoie des chunks
|
248 |
+
# for chunk in llm.stream(prompt): # suppose que llm.stream renvoie des chunks de réponse
|
249 |
+
# if isinstance(chunk, str):
|
250 |
+
# response += chunk # Accumulez la réponse si c'est déjà une chaîne
|
251 |
+
# else:
|
252 |
+
# response += chunk.content # Sinon, prenez le contenu du chunk (si chunk est un type d'objet spécifique)
|
253 |
+
|
254 |
+
# yield response, context # Renvoie la réponse mise à jour et le contexte
|
255 |
+
|
256 |
+
# # Mettez le résultat en cache à la fin
|
257 |
+
# question_cache[question] = (response, context)
|
258 |
|
259 |
# # Custom CSS for right-aligned text in textboxes
|
260 |
# custom_css = """
|
|
|
309 |
# show_error=True
|
310 |
# )
|
311 |
|
312 |
+
|
313 |
+
|
314 |
# CSS personnalisé avec l'importation de Google Fonts
|
315 |
custom_css = """
|
316 |
/* Import Google Fonts - Noto Sans Arabic */
|
|
|
417 |
}
|
418 |
"""
|
419 |
|
420 |
+
def process_question(question: str):
|
421 |
+
"""
|
422 |
+
Process the question and return the answer and context
|
423 |
+
"""
|
424 |
+
# Check cache first
|
425 |
+
if question in question_cache:
|
426 |
+
return question_cache[question], "" # Retourne la réponse cachée et un statut vide
|
427 |
+
|
428 |
+
relevant_docs = retriever(question)
|
429 |
+
context = "\n".join([doc.page_content for doc in relevant_docs])
|
430 |
+
|
431 |
+
prompt = prompt_template.format_messages(
|
432 |
+
context=context,
|
433 |
+
question=question
|
434 |
+
)
|
435 |
+
|
436 |
+
response = ""
|
437 |
+
for chunk in llm.stream(prompt):
|
438 |
+
if isinstance(chunk, str):
|
439 |
+
response += chunk
|
440 |
+
else:
|
441 |
+
response += chunk.content
|
442 |
+
|
443 |
+
# Mettre en cache le résultat
|
444 |
+
question_cache[question] = response
|
445 |
+
|
446 |
+
return response, context, "" # Ajoute une chaîne vide pour le statut
|
447 |
+
|
448 |
+
# Interface Gradio avec la correction
|
449 |
with gr.Blocks(css=custom_css) as iface:
|
450 |
with gr.Column(elem_classes="container"):
|
451 |
gr.Markdown(
|
|
|
481 |
variant="primary"
|
482 |
)
|
483 |
|
|
|
484 |
status_text = gr.Markdown("", elem_classes="rtl-text status-text")
|
485 |
|
486 |
def on_submit(question):
|
487 |
+
# Retourne trois valeurs : réponse, contexte et statut
|
488 |
+
result, context, _ = process_question(question)
|
489 |
+
status = "تمت معالجة السؤال بنجاح" # "Question processed successfully"
|
490 |
+
return result, context, status
|
491 |
|
492 |
submit_btn.click(
|
493 |
fn=on_submit,
|