|
|
|
|
|
""" |
|
Wikipedia Source Extractor: |
|
|
|
This script retrieves the source code of Wikipedia pages based on URLs found in a text file. |
|
Instead of saving the entire HTML of the page, it trims the content, focusing on the main article |
|
section, thereby limiting the size of each record. |
|
|
|
Required: |
|
pip install aiohttp aiofiles |
|
|
|
Usage: |
|
- Ensure you have a file named "wiki_link.txt" in the same directory as the script. |
|
- The file should contain one Wikipedia URL per line. |
|
- Run the script. |
|
- Extracted content will be saved under the "sources/html_wiki" directory with the name format "{index}.txt". |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 14-09-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr |
|
""" |
|
|
|
import os |
|
import asyncio |
|
import aiohttp |
|
import aiofiles |
|
|
|
START_INDEX = 0 |
|
path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
async def fetch_page_content(session, link): |
|
"""Fetches the page content given a URL.""" |
|
try: |
|
async with session.get(link) as response: |
|
return await response.text() |
|
except: |
|
print(f"Error fetching content from {link}") |
|
return None |
|
|
|
def extract_content(source): |
|
"""Extracts the main article section from the full page source.""" |
|
start_idx = source.find('<div id="siteSub"') |
|
if start_idx == -1: |
|
return None |
|
|
|
source = source[start_idx:] |
|
end_markers = ['id="Notes_et_références"', 'id="Articles_connexes"'] |
|
for marker in end_markers: |
|
end_idx = source.find(marker) |
|
if end_idx != -1: |
|
source = source[:end_idx] + '>' |
|
break |
|
return source |
|
|
|
async def main(): |
|
"""Main async function to process each link.""" |
|
async with aiohttp.ClientSession() as session: |
|
with open(os.path.join(path, "wiki_link.txt"), "r") as f: |
|
links = f.readlines() |
|
|
|
for i, link in enumerate(links[START_INDEX:], start=START_INDEX+1): |
|
print(f"Processing link {i}/{len(links)}") |
|
|
|
html_content = await fetch_page_content(session, link.strip()) |
|
if not html_content: |
|
continue |
|
|
|
content = extract_content(html_content) |
|
if not content: |
|
print(f"Unable to extract content from {link}") |
|
continue |
|
|
|
output_file_path = os.path.join(path, f"sources/html_wiki/{i}.txt") |
|
async with aiofiles.open(output_file_path, "w", encoding="utf-8") as out_file: |
|
await out_file.write(content) |
|
|
|
loop = asyncio.get_event_loop() |
|
loop.run_until_complete(main()) |
|
|