Par-Four-Fineweb-Edu-Fortified / par-4-forti-four.py
Josephgflowers's picture
Upload 3 files
872030d verified
raw
history blame
9.5 kB
import csv
from datasets import load_dataset
import os
import time
import signal
import requests
from multiprocessing import Pool, Manager
from functools import partial
# Define parameters
score_threshold = 4
error_log_file = "error_log.txt"
# List of dataset folders to process
dataset_folders = [
"CC-MAIN-2013-20", "CC-MAIN-2013-48", "CC-MAIN-2014-10", "CC-MAIN-2014-15",
"CC-MAIN-2014-23", "CC-MAIN-2014-35", "CC-MAIN-2014-41", "CC-MAIN-2014-42",
"CC-MAIN-2014-49", "CC-MAIN-2014-52", "CC-MAIN-2015-06", "CC-MAIN-2015-11",
"CC-MAIN-2015-14", "CC-MAIN-2015-18", "CC-MAIN-2015-22", "CC-MAIN-2015-27",
"CC-MAIN-2015-32", "CC-MAIN-2015-35", "CC-MAIN-2015-40", "CC-MAIN-2015-48",
"CC-MAIN-2016-07", "CC-MAIN-2016-18", "CC-MAIN-2016-22", "CC-MAIN-2016-26",
"CC-MAIN-2016-30", "CC-MAIN-2016-36", "CC-MAIN-2016-40", "CC-MAIN-2016-44",
"CC-MAIN-2016-50", "CC-MAIN-2017-04", "CC-MAIN-2017-09", "CC-MAIN-2017-13",
"CC-MAIN-2017-17", "CC-MAIN-2017-22", "CC-MAIN-2017-26", "CC-MAIN-2017-30",
"CC-MAIN-2017-34", "CC-MAIN-2017-39", "CC-MAIN-2017-43", "CC-MAIN-2017-47",
"CC-MAIN-2017-51", "CC-MAIN-2018-05", "CC-MAIN-2018-09", "CC-MAIN-2018-13",
"CC-MAIN-2018-17", "CC-MAIN-2018-22", "CC-MAIN-2018-26", "CC-MAIN-2018-30",
"CC-MAIN-2018-34", "CC-MAIN-2018-39", "CC-MAIN-2018-43", "CC-MAIN-2018-47",
"CC-MAIN-2018-51", "CC-MAIN-2019-04", "CC-MAIN-2019-09", "CC-MAIN-2019-13",
"CC-MAIN-2019-18", "CC-MAIN-2019-22", "CC-MAIN-2019-26", "CC-MAIN-2019-30",
"CC-MAIN-2019-35", "CC-MAIN-2019-39", "CC-MAIN-2019-43", "CC-MAIN-2019-47",
"CC-MAIN-2019-51", "CC-MAIN-2020-05", "CC-MAIN-2020-10", "CC-MAIN-2020-16",
"CC-MAIN-2020-24", "CC-MAIN-2020-29", "CC-MAIN-2020-34", "CC-MAIN-2020-40",
"CC-MAIN-2020-45", "CC-MAIN-2020-50", "CC-MAIN-2021-04", "CC-MAIN-2021-10",
"CC-MAIN-2021-17", "CC-MAIN-2021-21", "CC-MAIN-2021-25", "CC-MAIN-2021-31",
"CC-MAIN-2021-39", "CC-MAIN-2021-43", "CC-MAIN-2021-49", "CC-MAIN-2022-05",
"CC-MAIN-2022-21", "CC-MAIN-2022-27", "CC-MAIN-2022-33", "CC-MAIN-2022-40",
"CC-MAIN-2022-49", "CC-MAIN-2023-06", "CC-MAIN-2023-14", "CC-MAIN-2023-23",
"CC-MAIN-2023-40", "CC-MAIN-2023-50", "CC-MAIN-2024-10"
]
# Global variable for interruption
interrupt_flag = Manager().Value('i', False)
# Function to log errors
def log_error(error_message):
with open(error_log_file, "a") as error_log:
error_log.write(f"{error_message}\n")
# Retry mechanism to handle connection errors when loading the dataset
def retry_request(load_dataset_function, max_retries=5, wait_time=5):
retries = 0
while retries < max_retries:
try:
dataset = load_dataset_function()
return dataset
except requests.exceptions.ConnectionError as e:
log_error(f"Connection error: {e}. Retrying in {wait_time} seconds...")
retries += 1
time.sleep(wait_time)
except Exception as e:
log_error(f"Unexpected error: {e}. Retrying in {wait_time} seconds...")
retries += 1
time.sleep(wait_time)
log_error("Max retries exceeded.")
return None
# Function to save the text column to a file with start and stop tokens
def save_text_column(entry, output_text_file):
try:
text = entry["text"]
with open(output_text_file, "a", encoding='utf-8') as f:
f.write(f"<s>\n{text}</s>\n")
except KeyError as e:
log_error(f"Missing 'text' field: {e}")
# Function to save score, text, and URL to a CSV file, with error handling
def save_to_csv(entry, output_csv_file, write_header=False):
try:
with open(output_csv_file, mode='a', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
if write_header:
writer.writerow(["score", "text", "url"]) # CSV headers
score = entry["score"]
text = entry["text"]
url = entry.get("url", "N/A") # Ensure 'url' is included
writer.writerow([score, text, url])
except KeyError as e:
log_error(f"Missing field in entry: {e}")
# Graceful exit handling
def signal_handler(sig, frame):
global interrupt_flag
print("Interrupt received, saving progress and exiting...")
interrupt_flag.value = True # Set the flag to stop processing
exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Function to process a single folder
def process_folder(folder, score_threshold):
global interrupt_flag
# Define per-folder log file
log_file = f"processing_log_{folder}.txt"
# Function to log progress to a file
def log_progress(last_id):
with open(log_file, "w") as log:
log.write(f"{last_id}")
# Function to resume from a specific point by reading the log file
def resume_progress():
if os.path.exists(log_file):
with open(log_file, "r") as log:
last_id = log.read().strip()
if last_id == 'None' or last_id == '':
last_id = None
return last_id
return None
print(f"Processing dataset folder: {folder}")
# Define per-folder output files
output_text_file = f"forti-sampled_text_dataset_{folder}.txt"
output_csv_file = f"forti-sampled_dataset_{folder}.csv"
# Load dataset with retry mechanism
dataset = retry_request(lambda: load_dataset(
"airtrain-ai/fineweb-edu-fortified",
folder,
split="train",
streaming=True
))
if not dataset:
log_error(f"Failed to load dataset {folder}. Skipping.")
return
# Retrieve last processed ID for resuming
last_processed_id = resume_progress()
# Initialize variables
found_last_id = last_processed_id is None
processed_entries = 0 # Reset processed_entries for the new folder
# Process entries
while True:
try:
for entry in dataset:
if interrupt_flag.value:
break # Exit loop if interrupt flag is set
# Skip entries until we reach the last processed ID
entry_id = entry.get('id')
if not found_last_id:
if entry_id == last_processed_id:
found_last_id = True
continue
# Update last_processed_id
last_processed_id = entry_id
# Check if entry meets the score threshold
if entry.get('score', 0) >= score_threshold:
# Save entry to text file and CSV
# Write headers if files are new
if processed_entries == 0:
write_header = True
else:
write_header = False
save_to_csv(entry, output_csv_file, write_header=write_header)
save_text_column(entry, output_text_file)
processed_entries += 1
if processed_entries % 100 == 0:
log_progress(last_processed_id)
print(f"Processed {processed_entries} entries from {folder}...") # Terminal output
break # Exit while loop when dataset iteration is complete
except requests.exceptions.ConnectionError as e:
# Handle connection error during iteration
log_error(f"Connection error during iteration in {folder}: {e}")
print(f"Connection error during iteration in {folder}: {e}. Retrying in 5 seconds...")
time.sleep(5)
# Re-initialize dataset streaming from the point after last_processed_id
dataset = retry_request(lambda: load_dataset(
"airtrain-ai/fineweb-edu-fortified",
folder,
split="train",
streaming=True
))
if not dataset:
log_error(f"Failed to reload dataset {folder} after connection error. Skipping.")
break
# Reset found_last_id to False to skip entries up to last_processed_id
found_last_id = False
except Exception as e:
log_error(f"Error during processing in {folder}: {e}")
print(f"Error during processing in {folder}: {e}. Skipping entry.")
continue
# After processing all entries in the folder, log progress
log_progress(last_processed_id)
print(f"Completed processing folder: {folder}")
if interrupt_flag.value:
print(f"Processing interrupted in folder: {folder}")
# Main process function to process multiple folders in parallel
def process_all_folders_parallel(dataset_folders, score_threshold):
global interrupt_flag
# Use a multiprocessing Pool to process folders in parallel
with Pool(processes=os.cpu_count()) as pool:
try:
# Partial function to fix the score_threshold parameter
func = partial(process_folder, score_threshold=score_threshold)
pool.map(func, dataset_folders)
except KeyboardInterrupt:
print("KeyboardInterrupt received, terminating pool...")
pool.terminate()
pool.join()
print("Pool terminated.")
interrupt_flag.value = True
print("Processing complete.")
# Start processing all folders in parallel
process_all_folders_parallel(dataset_folders, score_threshold)
print("Filtered datasets saved to individual files per folder.")