File size: 8,290 Bytes
073b267 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
import pandas as pd
import re
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import os
import glob
# Science keywords (formatted for regex word boundaries)
science_keywords_list = [
# Core Types of Reasoning
"deductive reasoning", "inductive reasoning", "abductive reasoning",
"deductive logic", "inductive logic", "probabilistic reasoning",
"hypothetical reasoning", "falsifiability", "meta-cognition",
# Logic Structures and Components
"syllogism", "proposition", "premise", "conclusion", "logical fallacy",
"argument", "logical consistency", "logical operator", "step by step",
# Analytical and Critical Thinking
"critical thinking", "analytical skills", "creative thinking",
"convergent thinking", "divergent thinking", "contextual analysis",
"pattern recognition", "structured reflection", "reasoned judgment",
"cognitive load", "counterfactual thinking", "comparative reasoning",
"subjective reasoning", "objective reasoning", "systematic approach",
# Hypothesis and Evidence Analysis
"hypothesis testing", "hypothesis generation", "evidence-based reasoning",
"empirical reasoning", "evidence synthesis", "confirmation bias",
"cognitive bias", "causation vs correlation", "construct validity",
# Problem Solving and Decision Making
"problem analysis", "brainstorming", "decision making", "decision fatigue",
"satisficing", "bounded rationality", "opportunity cost",
"cost-benefit analysis", "optimization", "strategic planning",
"trade-off analysis", "prioritization matrix", "value prioritization",
# Heuristics and Algorithms
"heuristic", "heuristic reasoning", "algorithm", "recursive thinking",
"pattern matching", "dynamic programming", "systematic approach",
# Data Analysis and Modeling
"data analysis", "causal reasoning", "correlation", "probabilistic inference",
"qualitative analysis", "quantitative analysis", "predictive modeling",
"belief revision", "mental modeling", "proportional reasoning",
# Cognitive Processes and Biases
"cognitive dissonance", "framing effect", "bias mitigation",
"normative reasoning", "intuitive thinking", "belief bias",
# Argumentation and Discourse
"counterargument", "debate", "dialectic", "socratic questioning",
"disjunctive reasoning", "conjunctive reasoning", "chain of thought",
# Problem Decomposition and Structuring
"root cause analysis", "5 whys", "decision tree", "flow chart",
"process mapping", "mind mapping", "ideation", "brainwriting",
"problem decomposition", "value chain analysis",
# Analytical Frameworks and Techniques
"SWOT analysis", "gap analysis", "risk assessment", "scenario planning",
"simulation", "backcasting", "game theory", "decision matrix",
"opportunity analysis", "knowledge representation",
# Creative Thinking and Synthesis
"lateral thinking", "synergistic thinking", "brainstorming",
"synthesis", "ideation", "hypothetical deduction",
# Additional Analytical Techniques
"comparative analysis", "analytical hierarchy process", "multicriteria decision analysis",
"trade-off analysis", "constraint analysis", "thought experiment",
]
# Escape special regex characters and add word boundaries
science_keywords = [
r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list
]
# Combine science keywords into a single regex pattern using non-capturing groups
science_regex = r'(?:' + r'|'.join(science_keywords) + r')'
# Function to process a chunk of the dataset
def process_chunk(chunk):
# Assign column names if they are not already set
if list(chunk.columns) != ['score', 'text', 'url']:
chunk.columns = ['score', 'text', 'url']
# Use vectorized string operations for efficiency
# Count the number of matches in each column
score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
# Handle NaN values by filling them with zero
score_counts = score_counts.fillna(0)
url_counts = url_counts.fillna(0)
text_counts = text_counts.fillna(0)
# Sum the counts to get the science score
match_counts = score_counts + url_counts + text_counts
match_counts = match_counts.astype(int)
#
#
# Set a threshold for the minimum science score
threshold = 15 # Adjust this value as needed
#
#
# Filter rows that meet the threshold
filtered_chunk = chunk[match_counts >= threshold].copy()
filtered_chunk['science_score'] = match_counts[match_counts >= threshold]
# Replace the original 'score' with 'science_score'
filtered_chunk['score'] = filtered_chunk['science_score']
filtered_chunk = filtered_chunk.drop(columns=['science_score'])
return filtered_chunk
# Function to process a single CSV file
def process_file(input_file, output_file):
# Read the CSV file in chunks, assuming no header in the CSV file
chunk_size = 10000 # Adjust this value based on your memory constraints
reader = pd.read_csv(input_file, chunksize=chunk_size, header=None)
# Prepare the output file
first_chunk = True
# Number of worker processes
num_workers = 8 # Adjust based on your CPU cores
# Batch size for chunks to process in parallel
batch_size = num_workers * 4 # Adjust based on memory constraints
chunk_list = []
with ProcessPoolExecutor(max_workers=num_workers) as executor:
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
chunk_list.append(chunk)
if len(chunk_list) == batch_size:
# Process batch of chunks in parallel
futures = [executor.submit(process_chunk, c) for c in chunk_list]
for future in tqdm(futures, desc='Processing batch', leave=False):
filtered_chunk = future.result()
if not filtered_chunk.empty:
if first_chunk:
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
first_chunk = False
else:
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
chunk_list = []
# Process any remaining chunks
if chunk_list:
futures = [executor.submit(process_chunk, c) for c in chunk_list]
for future in tqdm(futures, desc='Processing last batch', leave=False):
filtered_chunk = future.result()
if not filtered_chunk.empty:
if first_chunk:
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
first_chunk = False
else:
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
print(f'Finished processing {input_file}')
# List of directories to process
data_dir = '/media/joe/512-3/csv'
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed
directories = [os.path.join(data_dir, year) for year in years]
# Process each CSV file in each directory
for dir_path in directories:
if not os.path.isdir(dir_path):
print(f'Directory not found: {dir_path}')
continue
csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
print(f'Found {len(csv_files)} CSV files in {dir_path}')
for input_file in csv_files:
# Construct output file name
base_name = os.path.basename(input_file)
output_file = os.path.join(
dir_path, 'reason_' + base_name
)
# Check if output file already exists to avoid reprocessing
if os.path.exists(output_file):
print(f'Output file already exists. Skipping: {output_file}')
continue
process_file(input_file, output_file)
|