File size: 1,567 Bytes
8ae2c07 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
# -*- coding: utf-8 -*-
"""
Wikipedia URLs Extractor:
Script to download the Wikipedia dataset from Hugging Face, extract URLs,
and save them to a text file for further processing.
Required:
pip install datasets
Using MIN_LENGTH = 1400 results in approximately 1,100,000 URLs.
Using MIN_LENGTH = 1000 results in approximately 1,800,000 URLs.
Author : Guillaume Eckendoerffer
Date : 14-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
"""
import os
from datasets import load_dataset
# Constants
MIN_LENGTH = 1400 # Minimum text length in number of characters to be added to the URL list.
EXCLUDE_TITLES_START = ['Liste ', 'Abréviations ', '(homonymie)']
# File path configurations
PATH = os.path.dirname(os.path.abspath(__file__))
URL_FILEPATH = os.path.join(PATH, "wiki_link.txt")
# Resetting the output file
with open(URL_FILEPATH, 'w', encoding="utf8") as url_file:
url_file.write("")
# Loading the dataset
dataset = load_dataset('wikipedia', '20220301.fr')
subset = dataset["train"]
add = 0
for i, row in enumerate(subset):
text = row["text"]
title = row["title"]
url = row["url"]
# Checking conditions to add the URL
if not any(title.startswith(e) for e in EXCLUDE_TITLES_START) and len(text) >= MIN_LENGTH:
add += 1
print(f"{add} : {len(text)} : {url} : {title}")
with open(URL_FILEPATH, 'a', encoding="utf8") as url_file:
url_file.write(f"{url.strip()} \n")
|