prawnikai / chatbot.py
adowu's picture
Update chatbot.py
7c7f0f4 verified
import logging
from huggingface_hub import InferenceClient
from typing import List, Dict
from config import HF_TOKEN, MODEL_NAME, SYSTEM_PROMPT
# Konfiguracja logowania
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class Chatbot:
def __init__(self):
logging.info("Inicjalizacja chatbota...")
self.client = InferenceClient(api_key=HF_TOKEN)
self.conversation_history = [
{"role": "system", "content": SYSTEM_PROMPT}
]
def generate_context(self, relevant_chunks: List[Dict]) -> str:
context = "Kontekst z przepis贸w prawnych:\n\n"
for chunk in relevant_chunks:
context += f"{chunk['text']}\n\n"
logging.info("Wygenerowano kontekst dla chatbota.")
return context
def get_response(self, user_input: str, context: str) -> str:
messages = self.conversation_history + [
{"role": "user", "content": f"Kontekst: {context}\n\nPytanie: {user_input}"}
]
response = ""
logging.info("Generowanie odpowiedzi dla u偶ytkownika: %s", user_input)
stream = self.client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=0.5,
max_tokens=8192,
top_p=0.7,
stream=True
)
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
response += content
logging.debug("Otrzymano fragment odpowiedzi: %s", content)
self.conversation_history.append({"role": "user", "content": user_input})
self.conversation_history.append({"role": "assistant", "content": response})
logging.info("Zwr贸cono odpowied藕: %s", response)
return response
def clear_history(self):
logging.info("Czyszczenie historii konwersacji.")
self.conversation_history = [
{"role": "system", "content": SYSTEM_PROMPT}
]