|
import pandas as pd |
|
import re |
|
from concurrent.futures import ProcessPoolExecutor |
|
from tqdm import tqdm |
|
import os |
|
import glob |
|
|
|
|
|
science_keywords_list = [ |
|
|
|
|
|
|
|
"deductive reasoning", "inductive reasoning", "abductive reasoning", |
|
"deductive logic", "inductive logic", "probabilistic reasoning", |
|
"hypothetical reasoning", "falsifiability", "meta-cognition", |
|
|
|
|
|
"syllogism", "proposition", "premise", "conclusion", "logical fallacy", |
|
"argument", "logical consistency", "logical operator", "step by step", |
|
|
|
|
|
"critical thinking", "analytical skills", "creative thinking", |
|
"convergent thinking", "divergent thinking", "contextual analysis", |
|
"pattern recognition", "structured reflection", "reasoned judgment", |
|
"cognitive load", "counterfactual thinking", "comparative reasoning", |
|
"subjective reasoning", "objective reasoning", "systematic approach", |
|
|
|
|
|
"hypothesis testing", "hypothesis generation", "evidence-based reasoning", |
|
"empirical reasoning", "evidence synthesis", "confirmation bias", |
|
"cognitive bias", "causation vs correlation", "construct validity", |
|
|
|
|
|
"problem analysis", "brainstorming", "decision making", "decision fatigue", |
|
"satisficing", "bounded rationality", "opportunity cost", |
|
"cost-benefit analysis", "optimization", "strategic planning", |
|
"trade-off analysis", "prioritization matrix", "value prioritization", |
|
|
|
|
|
"heuristic", "heuristic reasoning", "algorithm", "recursive thinking", |
|
"pattern matching", "dynamic programming", "systematic approach", |
|
|
|
|
|
"data analysis", "causal reasoning", "correlation", "probabilistic inference", |
|
"qualitative analysis", "quantitative analysis", "predictive modeling", |
|
"belief revision", "mental modeling", "proportional reasoning", |
|
|
|
|
|
"cognitive dissonance", "framing effect", "bias mitigation", |
|
"normative reasoning", "intuitive thinking", "belief bias", |
|
|
|
|
|
"counterargument", "debate", "dialectic", "socratic questioning", |
|
"disjunctive reasoning", "conjunctive reasoning", "chain of thought", |
|
|
|
|
|
"root cause analysis", "5 whys", "decision tree", "flow chart", |
|
"process mapping", "mind mapping", "ideation", "brainwriting", |
|
"problem decomposition", "value chain analysis", |
|
|
|
|
|
"SWOT analysis", "gap analysis", "risk assessment", "scenario planning", |
|
"simulation", "backcasting", "game theory", "decision matrix", |
|
"opportunity analysis", "knowledge representation", |
|
|
|
|
|
"lateral thinking", "synergistic thinking", "brainstorming", |
|
"synthesis", "ideation", "hypothetical deduction", |
|
|
|
|
|
"comparative analysis", "analytical hierarchy process", "multicriteria decision analysis", |
|
"trade-off analysis", "constraint analysis", "thought experiment", |
|
] |
|
|
|
|
|
|
|
science_keywords = [ |
|
r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list |
|
] |
|
|
|
|
|
science_regex = r'(?:' + r'|'.join(science_keywords) + r')' |
|
|
|
|
|
def process_chunk(chunk): |
|
|
|
if list(chunk.columns) != ['score', 'text', 'url']: |
|
chunk.columns = ['score', 'text', 'url'] |
|
|
|
|
|
|
|
score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE) |
|
|
|
|
|
score_counts = score_counts.fillna(0) |
|
url_counts = url_counts.fillna(0) |
|
text_counts = text_counts.fillna(0) |
|
|
|
|
|
match_counts = score_counts + url_counts + text_counts |
|
match_counts = match_counts.astype(int) |
|
|
|
|
|
|
|
|
|
threshold = 15 |
|
|
|
|
|
|
|
|
|
filtered_chunk = chunk[match_counts >= threshold].copy() |
|
filtered_chunk['science_score'] = match_counts[match_counts >= threshold] |
|
|
|
|
|
filtered_chunk['score'] = filtered_chunk['science_score'] |
|
filtered_chunk = filtered_chunk.drop(columns=['science_score']) |
|
|
|
return filtered_chunk |
|
|
|
|
|
def process_file(input_file, output_file): |
|
|
|
chunk_size = 10000 |
|
reader = pd.read_csv(input_file, chunksize=chunk_size, header=None) |
|
|
|
|
|
first_chunk = True |
|
|
|
|
|
num_workers = 8 |
|
|
|
|
|
batch_size = num_workers * 4 |
|
|
|
chunk_list = [] |
|
with ProcessPoolExecutor(max_workers=num_workers) as executor: |
|
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'): |
|
chunk_list.append(chunk) |
|
if len(chunk_list) == batch_size: |
|
|
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
chunk_list = [] |
|
|
|
if chunk_list: |
|
futures = [executor.submit(process_chunk, c) for c in chunk_list] |
|
for future in tqdm(futures, desc='Processing last batch', leave=False): |
|
filtered_chunk = future.result() |
|
if not filtered_chunk.empty: |
|
if first_chunk: |
|
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False) |
|
first_chunk = False |
|
else: |
|
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False) |
|
print(f'Finished processing {input_file}') |
|
|
|
|
|
data_dir = '/media/joe/512-3/csv' |
|
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] |
|
directories = [os.path.join(data_dir, year) for year in years] |
|
|
|
|
|
for dir_path in directories: |
|
if not os.path.isdir(dir_path): |
|
print(f'Directory not found: {dir_path}') |
|
continue |
|
csv_files = glob.glob(os.path.join(dir_path, '*.csv')) |
|
print(f'Found {len(csv_files)} CSV files in {dir_path}') |
|
for input_file in csv_files: |
|
|
|
base_name = os.path.basename(input_file) |
|
output_file = os.path.join( |
|
dir_path, 'reason_' + base_name |
|
) |
|
|
|
|
|
if os.path.exists(output_file): |
|
print(f'Output file already exists. Skipping: {output_file}') |
|
continue |
|
|
|
process_file(input_file, output_file) |
|
|
|
|