Datasets:

Modalities:
Text
Formats:
text
Libraries:
Datasets
License:
Chinese_Pipeline / main.py
williamlin0518
init commit
d9aeb25
raw
history blame
11.3 kB
import os
import re
import subprocess
from typing import Set, Tuple
# Third-party imports
import spacy
import jieba
import gzip
import json
from pathlib import Path
import logging
from datatrove.executor.local import LocalPipelineExecutor
from datatrove.pipeline.filters import (
RegexFilter,
LanguageFilter,
GopherQualityFilter,
C4QualityFilter,
)
from datatrove.pipeline.dedup import (
MinhashDedupCluster,
MinhashDedupFilter,
MinhashDedupSignature,
MinhashConfig,
MinhashDedupBuckets,
)
from datatrove.pipeline.writers.jsonl import JsonlWriter
from datatrove.pipeline.writers.parquet import ParquetWriter
from datatrove.pipeline.readers import ParquetReader
from datatrove.pipeline.tokens import TokensCounter
from datatrove.pipeline.formatters import PIIFormatter
# Constants
MAIN_OUTPUT_PATH = "./output"
FILTERING_OUTPUT_PATH = f"{MAIN_OUTPUT_PATH}/base_processing"
DUMP = "CC-MAIN-2024-26"
S3_MINHASH_BASE_PATH = f"{MAIN_OUTPUT_PATH}/minhash"
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Configuration
minhash_config = MinhashConfig(
num_buckets=14,
hashes_per_bucket=8,
n_grams=2,
)
# Chinese stop words
chinese_stop_words = [
"的", "了", "和", "是", "就", "都", "而", "及", "與", "這", "其", "但", "並", "個", "我",
"你", "他", "她", "它", "們", "我們", "你們", "他們", "她們", "它們", "在", "有", "人",
"這個", "那個", "如果", "因為", "所以", "可以", "沒有", "很", "非常", "得", "著", "過", "為", "再",
"吧", "呢", "啊", "哪", "那", "麼", "什麼", "誰", "哪裡", "哪裡", "怎麼", "怎麼樣", "為什麼", "將"
]
import shutil
def clear_previous_outputs():
directories_to_clear = [
f"{MAIN_OUTPUT_PATH}/base_processing",
f"{MAIN_OUTPUT_PATH}/filtered_output",
f"{S3_MINHASH_BASE_PATH}/{DUMP}/signatures",
f"{S3_MINHASH_BASE_PATH}/{DUMP}/buckets",
f"{S3_MINHASH_BASE_PATH}/{DUMP}/remove_ids",
f"{S3_MINHASH_BASE_PATH}/{DUMP}/deduped_output",
f"{MAIN_OUTPUT_PATH}/logs",
]
for directory in directories_to_clear:
if os.path.exists(directory):
print(f"Removing: {directory}")
shutil.rmtree(directory)
os.makedirs(directory, exist_ok=True)
def read_character_mappings(file_path: str) -> Tuple[Set[str], Set[str]]:
traditional = set()
simplified = set()
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
chars = line.strip().split('\t')
if len(chars) >= 2:
trad, simps = chars[0], chars[1].split()
traditional.add(trad)
simplified.update(simps)
return traditional, simplified
def read_traditional_characters(file_path: str) -> Set[str]:
with open(file_path, 'r', encoding='utf-8') as f:
return set(f.read().strip())
def create_simplified_filter(simplified_only: Set[str]) -> RegexFilter:
simplified_pattern = '|'.join(re.escape(char) for char in simplified_only)
return RegexFilter(
regex_exp=simplified_pattern,
exclusion_writer=JsonlWriter(
f"{FILTERING_OUTPUT_PATH}/removed/simplified/{DUMP}",
output_filename="${rank}.jsonl.gz"
)
)
def initial_filtering_pipeline(simplified_filter: RegexFilter) -> Tuple[LocalPipelineExecutor, LocalPipelineExecutor, LocalPipelineExecutor]:
# Initial filtering pipeline - Part 1
initial_executor_part1 = LocalPipelineExecutor(
pipeline=[
ParquetReader("./input/hugg-init/out-1720278098.parquet"),
LanguageFilter(languages=["zh"], language_threshold=0.65),
simplified_filter,
ParquetWriter(f"{MAIN_OUTPUT_PATH}/temp_output_part1")
],
tasks=8,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/base_processing/{DUMP}/part1",
)
# Initial filtering pipeline - Part 2
initial_executor_part2 = LocalPipelineExecutor(
pipeline=[
ParquetReader(f"{MAIN_OUTPUT_PATH}/temp_output_part1"),
GopherQualityFilter(
min_doc_words=15,
max_doc_words=10000,
min_avg_word_length=1,
max_avg_word_length=4,
max_symbol_word_ratio=0.1,
max_bullet_lines_ratio=0.9,
max_ellipsis_lines_ratio=0.3,
min_stop_words=1,
stop_words=chinese_stop_words,
exclusion_writer=JsonlWriter(f"{FILTERING_OUTPUT_PATH}/removed/4_gopher_qual/{DUMP}"),
language="zh",
),
ParquetWriter(f"{MAIN_OUTPUT_PATH}/temp_output_part2")
],
tasks=8,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/base_processing/{DUMP}/part2",
)
# Initial filtering pipeline - Part 3
initial_executor_part3 = LocalPipelineExecutor(
pipeline=[
ParquetReader(f"{MAIN_OUTPUT_PATH}/temp_output_part2"),
C4QualityFilter(
split_paragraph=False,
remove_citations=False,
filter_no_terminal_punct=False,
min_num_sentences=-1,
min_words_per_line=-1,
max_word_length=-1,
filter_lorem_ipsum=False,
filter_javascript=True,
filter_curly_bracket=True,
filter_policy=True,
language="zh",
exclusion_writer=JsonlWriter(f"{FILTERING_OUTPUT_PATH}/removed/5_c4/{DUMP}"),
),
ParquetWriter(f"{MAIN_OUTPUT_PATH}/filtered_output")
],
tasks=8,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/base_processing/{DUMP}/part3",
)
return initial_executor_part1, initial_executor_part2, initial_executor_part3
def deduplication_pipeline() -> Tuple[LocalPipelineExecutor, LocalPipelineExecutor, LocalPipelineExecutor, LocalPipelineExecutor]:
stage1 = LocalPipelineExecutor(
pipeline=[
ParquetReader(f"{MAIN_OUTPUT_PATH}/filtered_output"),
MinhashDedupSignature(
output_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/signatures",
config=minhash_config,
language="zh",
),
],
tasks=8,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/minhash/signatures",
)
stage2 = LocalPipelineExecutor(
pipeline=[
MinhashDedupBuckets(
input_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/signatures",
output_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/buckets",
config=minhash_config,
),
],
tasks=minhash_config.num_buckets,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/minhash/buckets",
)
stage3 = LocalPipelineExecutor(
pipeline=[
MinhashDedupCluster(
input_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/buckets",
output_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/remove_ids",
config=minhash_config,
),
],
tasks=1,
workers=1,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/minhash/clustering",
)
stage4 = LocalPipelineExecutor(
pipeline=[
ParquetReader(f"{MAIN_OUTPUT_PATH}/filtered_output"),
TokensCounter(),
MinhashDedupFilter(input_folder=f"{S3_MINHASH_BASE_PATH}/{DUMP}/remove_ids"),
PIIFormatter(),
ParquetWriter(f"{S3_MINHASH_BASE_PATH}/{DUMP}/deduped_output"),
],
tasks=8,
workers=8,
logging_dir=f"{MAIN_OUTPUT_PATH}/logs/minhash/filtering",
)
return stage1, stage2, stage3, stage4
def analyze_removed_documents(removed_dir: str, simplified_pattern: str, output_file: str):
removed_files = Path(removed_dir).glob('*.jsonl.gz')
simplified_regex = re.compile(f'[{simplified_pattern}]')
total_documents = 0
documents_with_simplified = 0
with gzip.open(output_file, 'wt', encoding='utf-8') as out_f:
for file in removed_files:
logger.info(f"Analyzing file: {file.name}")
with gzip.open(file, 'rt', encoding='utf-8') as in_f:
for line in in_f:
total_documents += 1
document = json.loads(line)
text = document.get('text', '')
matches = list(set(simplified_regex.findall(text)))
if matches:
documents_with_simplified += 1
result = {
"document_number": total_documents,
"simplified_characters": matches,
"text_snippet": text
}
out_f.write(json.dumps(result, ensure_ascii=False) + '\n')
logger.info(f"Analysis complete. Results written to {output_file}")
logger.info(f"Total documents analyzed: {total_documents}")
logger.info(f"Documents with simplified characters: {documents_with_simplified}")
def main(restart=False):
if restart:
clear_previous_outputs()
# Read character mappings
ts_traditional, ts_simplified = read_character_mappings("./input/ts-map/TSCharacters.txt")
st_traditional, st_simplified = read_character_mappings("./input/ts-map/STCharacters.txt")
# Combine sets
traditional = ts_traditional.union(st_traditional)
simplified = ts_simplified.union(st_simplified)
# Additional processing
white_list = set("床峰群秘霉庄痴雇简体踪")
additional_set = read_traditional_characters("./input/dict-tradi2/trad.txt")
print(f"Number of additional traditional characters: {len(additional_set)}")
simplified_only = simplified - traditional - white_list
# Create simplified filter
simplified_filter = create_simplified_filter(simplified_only)
# Run initial filtering pipeline
initial_executor_part1, initial_executor_part2, initial_executor_part3 = initial_filtering_pipeline(simplified_filter)
initial_executor_part1.run()
initial_executor_part2.run()
initial_executor_part3.run()
logger.info("Initial filtering complete. Starting analysis of removed documents.")
removed_dir = f"{FILTERING_OUTPUT_PATH}/removed/simplified/{DUMP}"
output_file = f"{MAIN_OUTPUT_PATH}/simplified_analysis_results.jsonl.gz"
# Create the simplified pattern
simplified_pattern = ''.join(simplified_only)
analyze_removed_documents(removed_dir, simplified_pattern, output_file)
# Run deduplication pipeline
# stage1, stage2, stage3, stage4 = deduplication_pipeline()
# stage1.run()
# stage2.run()
# stage3.run()
# stage4.run()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Chinese text processing pipeline")
parser.add_argument("--restart", action="store_true", help="Clear previous outputs and restart processing")
args = parser.parse_args()
main(restart=args.restart)