File size: 6,292 Bytes
1ca7291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import os
import json
from bs4 import BeautifulSoup
import ebooklib
from ebooklib import epub
import re
import xml.etree.ElementTree as ET

folder_path = './books'  # Replace with your folder path
output_file_pattern = './output/output_part_{}.jsonl'  # Pattern for output files
part_size = 1  # Number of files per part
part_counter = 0
file_counter = 0

def correct_french_punctuation(text):
    # Correct spaces before punctuation in French
    text = re.sub(r'\s+([?!:;])', r'\1', text)  # Remove space before punctuation
    text = re.sub(r'([?!:;])\s*', r'\1 ', text)  # Add space after punctuation if not already present
    text = re.sub(r'\s*-\s*', '-', text)
    text = re.sub(r'\s*–\s*', '-', text)
    return text

def find_navpoint_2_in_toc(book):
    toc_item = book.get_item_with_id('ncx')
    if toc_item is None:
        return None
    toc_content = toc_item.get_content()
    namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
    toc_root = ET.fromstring(toc_content)

    nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
    for nav_point in nav_points:
        if nav_point.attrib.get('id') == 'navpoint-2':
            return nav_point.text if nav_point.text else None
    return None

def find_section_href_in_toc(book, section_title):
    toc_item = book.get_item_with_id('ncx')
    if toc_item is None:
        return None
    toc_content = toc_item.get_content()
    namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
    toc_root = ET.fromstring(toc_content)
    nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
    for nav_point in nav_points:
        text_elements = nav_point.findall('.//ncx:navLabel/ncx:text', namespaces)
        for text_element in text_elements:
            if text_element.text == section_title:
                content_element = nav_point.find('.//ncx:content', namespaces)
                if content_element is not None:
                    return content_element.attrib['src']
    return None

def extract_content_from_epub(book):
    text = ''
    start_section = find_section_href_in_toc(book, "Avant propos") or find_section_href_in_toc(book, "Premier Chapitre")
    end_section_1 = find_section_href_in_toc(book, "À propos de cette édition électronique")
    end_section_2 = find_section_href_in_toc(book, "Bibliographie – Œuvres complètes")

    # Determine the final end section
    if end_section_1 is not None and end_section_2 is not None:
        end_section = end_section_1 if end_section_1 < end_section_2 else end_section_2
    elif end_section_1 is not None:
        end_section = end_section_1
    else:
        end_section = end_section_2

    extracting = start_section is None  # Start extracting if no specific start section

    for item in book.get_items():
        if item.get_type() == ebooklib.ITEM_DOCUMENT:
            item_id = item.get_name()
            if start_section and start_section in item_id:
                extracting = True
            if end_section and end_section in item_id:
                break
            if extracting or not start_section:
                try:
                    soup = BeautifulSoup(item.get_content(), 'html.parser')
                    for p in soup.find_all('p'):  # Process paragraph by paragraph
                        paragraph = p.get_text(separator='\n')
                        paragraph = paragraph.replace(u'\xa0', ' ')
                        paragraph = correct_french_punctuation(paragraph)
                        text += paragraph + '\n'
                        # Check for end phrases after each paragraph
                        if "FIN" in paragraph:
                            text = text.split("FIN", 1)[0]
                            print("End of book reached")
                            return text
                        elif "la Bibliothèque électronique du Québec" in paragraph:
                            text = text.split("la Bibliothèque électronique du Québec", 1)[0]
                            print("End of book reached")
                            return text
                        elif "ouvrage est le" in paragraph:
                            text = text.split("ouvrage est le", 1)[0]
                            print("End of book reached")
                            return text
                except Exception as e:
                    print(f"Error processing content: {e}")

    if not text:
        print("Fallback: Adding all text as no specific sections were found.")
        for item in book.get_items():
            if item.get_type() == ebooklib.ITEM_DOCUMENT:
                try:
                    soup = BeautifulSoup(item.get_content(), 'html.parser')
                    text += soup.get_text(separator='\n').replace(u'\xa0', ' ') + '\n'
                except Exception as e:
                    print(f"Error in fallback processing: {e}")

    return text




def extract_metadata_from_epub(book):
    metadata = {}
    try:
        metadata['title'] = book.get_metadata('DC', 'title')
        metadata['author'] = book.get_metadata('DC', 'creator')
        metadata['publisher'] = book.get_metadata('DC', 'publisher')
        # Add more metadata fields if needed
    except Exception as e:
        print(f"Error extracting metadata: {e}")
    return metadata

for file in os.listdir(folder_path):
    if file.endswith('.epub'):
        if file_counter % part_size == 0:
            if 'jsonl_file' in locals():
                jsonl_file.close()
            part_counter += 1
            jsonl_file = open(output_file_pattern.format(part_counter), 'w', encoding='utf-8')

        full_path = os.path.join(folder_path, file)
        try:
            book = epub.read_epub(full_path)
            text = extract_content_from_epub(book)
            meta = extract_metadata_from_epub(book)
            jsonl_file.write(json.dumps({"text": text, "meta": meta}, ensure_ascii=False) + '\n')
            file_counter += 1
            print(f"reading file {file}")
        except Exception as e:
            print(f"Error reading file {file}: {e}")

if 'jsonl_file' in locals():
    jsonl_file.close()