|
import os |
|
import re |
|
import logging |
|
from typing import List, Dict, Tuple |
|
import chromadb |
|
from chromadb.utils import embedding_functions |
|
from config import EMBEDDING_MODEL, DATABASE_DIR |
|
|
|
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
class KodeksProcessor: |
|
def __init__(self): |
|
logger.info(f"Inicjalizacja klienta bazy danych w katalogu: {DATABASE_DIR}") |
|
if not os.path.exists(DATABASE_DIR): |
|
os.makedirs(DATABASE_DIR) |
|
logger.info(f"Utworzono katalog {DATABASE_DIR}") |
|
|
|
self.client = chromadb.PersistentClient(path=DATABASE_DIR) |
|
logger.info("Klient bazy danych zainicjalizowany") |
|
|
|
try: |
|
self.collection = self.client.get_or_create_collection( |
|
name="kodeksy", |
|
embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction( |
|
model_name=EMBEDDING_MODEL |
|
) |
|
) |
|
logger.info("Kolekcja 'kodeksy' pobrana lub utworzona") |
|
except Exception as e: |
|
logger.error(f"B艂膮d podczas pobierania lub tworzenia kolekcji: {e}") |
|
raise |
|
|
|
def extract_metadata(self, text: str) -> Dict: |
|
metadata = {} |
|
dz_u_match = re.search(r'Dz\.U\.(\d{4})\.(\d+)\.(\d+)', text) |
|
if dz_u_match: |
|
metadata['dz_u'] = f"Dz.U.{dz_u_match.group(1)}.{dz_u_match.group(2)}.{dz_u_match.group(3)}" |
|
metadata['rok'] = dz_u_match.group(1) |
|
|
|
nazwa_match = re.search(r'USTAWA\s+z dnia(.*?)\n(.*?)\n', text) |
|
if nazwa_match: |
|
metadata['data_ustawy'] = nazwa_match.group(1).strip() |
|
metadata['nazwa'] = nazwa_match.group(2).strip() |
|
|
|
zmiany = re.findall(r'(\d{4}-\d{2}-\d{2})\s+(zm\.\s+DZ\.U\.(\d{4})\.(\d+)\.(\d+)\s+art\.\s+(\d+)(?:\s+搂\s+(\d+))?)', text) |
|
if zmiany: |
|
metadata['historia_zmian'] = [ |
|
{ |
|
'data': data, |
|
'dz_u': f"Dz.U.{rok}.{numer}.{pozycja}", |
|
'artykul': artykul, |
|
'paragraf': paragraf if paragraf else None |
|
} |
|
for data, _, rok, numer, pozycja, artykul, paragraf in zmiany |
|
] |
|
|
|
logger.debug(f"Wyodr臋bnione metadane: {metadata}") |
|
return metadata |
|
|
|
def split_header_and_content(self, text: str) -> Tuple[str, str]: |
|
parts = text.split("USTAWA", 1) |
|
if len(parts) > 1: |
|
return parts[0], "USTAWA" + parts[1] |
|
return "", text |
|
|
|
def process_article(self, article_text: str) -> Dict: |
|
art_num_match = re.match(r'Art\.\s*(\d+[a-z]?)', article_text) |
|
article_num = art_num_match.group(1) if art_num_match else "" |
|
|
|
paragraphs = re.findall(r'搂\s*(\d+)\.\s*(.*?)(?=搂\s*\d+|Art\.\s*\d+|$)', article_text, re.DOTALL) |
|
|
|
if not paragraphs: |
|
return { |
|
"article_num": article_num, |
|
"content": article_text.strip(), |
|
"has_paragraphs": False |
|
} |
|
|
|
return { |
|
"article_num": article_num, |
|
"paragraphs": paragraphs, |
|
"has_paragraphs": True |
|
} |
|
|
|
def split_into_chunks(self, text: str, metadata: Dict) -> List[Dict]: |
|
chunks = [] |
|
articles = re.split(r'(Art\.\s*\d+[a-z]?)', text) |
|
|
|
for i in range(1, len(articles), 2): |
|
article_title = articles[i].strip() |
|
article_content = articles[i + 1].strip() if i + 1 < len(articles) else "" |
|
|
|
processed_article = self.process_article(article_title + " " + article_content) |
|
|
|
chunk_metadata = { |
|
**metadata, |
|
"article": processed_article["article_num"] |
|
} |
|
|
|
if processed_article["has_paragraphs"]: |
|
for par_num, par_content in processed_article["paragraphs"]: |
|
chunk = { |
|
"text": f"{article_title} 搂{par_num}. {par_content.strip()}", |
|
"metadata": {**chunk_metadata, "paragraph": par_num} |
|
} |
|
chunks.append(chunk) |
|
logger.debug(f"Utworzono chunk: {chunk['text'][:100]}...") |
|
else: |
|
chunk = { |
|
"text": processed_article["content"], |
|
"metadata": chunk_metadata |
|
} |
|
chunks.append(chunk) |
|
logger.debug(f"Utworzono chunk: {chunk['text'][:100]}...") |
|
|
|
logger.debug(f"Podzielono tekst na {len(chunks)} chunk贸w.") |
|
return chunks |
|
|
|
def process_file(self, filepath: str) -> None: |
|
logger.info(f"Rozpocz臋cie przetwarzania pliku: {filepath}") |
|
try: |
|
with open(filepath, 'r', encoding='utf-8') as file: |
|
content = file.read() |
|
logger.info(f"Odczytano zawarto艣膰 pliku: {filepath}") |
|
|
|
header, main_content = self.split_header_and_content(content) |
|
metadata = self.extract_metadata(main_content) |
|
metadata['filename'] = os.path.basename(filepath) |
|
|
|
chunks = self.split_into_chunks(main_content, metadata) |
|
logger.info(f"Podzielono plik na {len(chunks)} chunk贸w") |
|
|
|
if chunks: |
|
self.collection.add( |
|
documents=[chunk["text"] for chunk in chunks], |
|
metadatas=[chunk["metadata"] for chunk in chunks], |
|
ids=[f"{metadata['filename']}_{chunk['metadata']['article']}_{i}" for i, chunk in enumerate(chunks)] |
|
) |
|
logger.info(f"Dodano {len(chunks)} chunk贸w do kolekcji z pliku {metadata['filename']}") |
|
else: |
|
logger.warning(f"Brak chunk贸w do dodania z pliku: {filepath}") |
|
except Exception as e: |
|
logger.error(f"B艂膮d podczas przetwarzania pliku {filepath}: {e}") |
|
|
|
def process_all_files(self, directory: str) -> None: |
|
logger.info(f"Rozpocz臋cie przetwarzania wszystkich plik贸w w katalogu: {directory}") |
|
if not os.path.exists(directory): |
|
logger.error(f"Katalog {directory} nie istnieje!") |
|
return |
|
try: |
|
files = [f for f in os.listdir(directory) if f.endswith('.txt')] |
|
logger.info(f"Znaleziono {len(files)} plik贸w .txt") |
|
for filename in files: |
|
filepath = os.path.join(directory, filename) |
|
logger.info(f"Przetwarzanie pliku: {filepath}") |
|
self.process_file(filepath) |
|
logger.info("Zako艅czono przetwarzanie plik贸w.") |
|
except Exception as e: |
|
logger.error(f"B艂膮d podczas przetwarzania plik贸w: {e}") |
|
|
|
def verify_data_loading(self): |
|
count = self.collection.count() |
|
logger.info(f"Ca艂kowita liczba dokument贸w w kolekcji: {count}") |
|
if count == 0: |
|
logger.warning("Nie za艂adowano 偶adnych dokument贸w do bazy danych.") |
|
|
|
def test_search(self): |
|
test_queries = ["kodeks karny", "art. 1", "przest臋pstwo"] |
|
for query in test_queries: |
|
results = self.search(query) |
|
logger.info(f"Zapytanie testowe '{query}' zwr贸ci艂o {len(results['documents'][0])} wynik贸w") |
|
|
|
def search(self, query: str, n_results: int = 3) -> Dict: |
|
logger.info(f"Wyszukiwanie w bazie danych dla zapytania: {query}") |
|
try: |
|
results = self.collection.query( |
|
query_texts=[query], |
|
n_results=n_results |
|
) |
|
logger.info(f"Znaleziono {len(results['documents'][0])} wynik贸w dla zapytania: {query}") |
|
return results |
|
except Exception as e: |
|
logger.error(f"B艂膮d podczas wyszukiwania: {e}") |
|
return {"documents": [[]], "metadatas": [[]], "distances": [[]]} |
|
|
|
def list_all_documents(self) -> None: |
|
try: |
|
all_docs = self.collection.get(include=['metadatas']) |
|
if all_docs['metadatas']: |
|
for metadata in all_docs['metadatas']: |
|
logger.info(f"Dokument: {metadata}") |
|
else: |
|
logger.info("Brak dokument贸w w bazie.") |
|
except Exception as e: |
|
logger.error(f"B艂膮d podczas listowania dokument贸w: {e}") |
|
|
|
if __name__ == "__main__": |
|
processor = KodeksProcessor() |
|
processor.process_all_files("data/kodeksy") |
|
processor.verify_data_loading() |
|
processor.test_search() |
|
processor.list_all_documents() |