File size: 2,015 Bytes
7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 7c7f0f4 cdd85c7 f0a8fa5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import logging
from huggingface_hub import InferenceClient
from typing import List, Dict
from config import HF_TOKEN, MODEL_NAME, SYSTEM_PROMPT
# Konfiguracja logowania
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class Chatbot:
def __init__(self):
logging.info("Inicjalizacja chatbota...")
self.client = InferenceClient(api_key=HF_TOKEN)
self.conversation_history = [
{"role": "system", "content": SYSTEM_PROMPT}
]
def generate_context(self, relevant_chunks: List[Dict]) -> str:
context = "Kontekst z przepisów prawnych:\n\n"
for chunk in relevant_chunks:
context += f"{chunk['text']}\n\n"
logging.info("Wygenerowano kontekst dla chatbota.")
return context
def get_response(self, user_input: str, context: str) -> str:
messages = self.conversation_history + [
{"role": "user", "content": f"Kontekst: {context}\n\nPytanie: {user_input}"}
]
response = ""
logging.info("Generowanie odpowiedzi dla użytkownika: %s", user_input)
stream = self.client.chat.completions.create(
model=MODEL_NAME,
messages=messages,
temperature=0.5,
max_tokens=8192,
top_p=0.7,
stream=True
)
for chunk in stream:
content = chunk.choices[0].delta.content
if content:
response += content
logging.debug("Otrzymano fragment odpowiedzi: %s", content)
self.conversation_history.append({"role": "user", "content": user_input})
self.conversation_history.append({"role": "assistant", "content": response})
logging.info("Zwrócono odpowiedź: %s", response)
return response
def clear_history(self):
logging.info("Czyszczenie historii konwersacji.")
self.conversation_history = [
{"role": "system", "content": SYSTEM_PROMPT}
] |