Josephgflowers
commited on
Commit
•
68aa86a
1
Parent(s):
dd65df2
Upload find-math-fine.py
Browse files- find-math-fine.py +200 -0
find-math-fine.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import re
|
3 |
+
from concurrent.futures import ProcessPoolExecutor
|
4 |
+
from tqdm import tqdm
|
5 |
+
import os
|
6 |
+
import glob
|
7 |
+
|
8 |
+
# Science keywords (formatted for regex word boundaries)
|
9 |
+
science_keywords_list = [
|
10 |
+
|
11 |
+
#logic_reasoning_problem_solving_keywords
|
12 |
+
"deductive reasoning", "inductive reasoning", "abductive reasoning", "logical fallacy",
|
13 |
+
"syllogism", "proposition", "premise", "conclusion",
|
14 |
+
"argument", "critical thinking", "analytical skills", "hypothesis testing",
|
15 |
+
"problem analysis", "brainstorming", "decision making", "creative thinking",
|
16 |
+
"heuristic", "algorithm", "data analysis", "causal reasoning",
|
17 |
+
"correlation", "evidence-based reasoning", "validity", "soundness",
|
18 |
+
"cognitive bias", "confirmation bias", "cognitive dissonance", "logical consistency",
|
19 |
+
"counterargument", "debate", "dialectic", "socratic questioning",
|
20 |
+
"root cause analysis", "SWOT analysis", "decision tree", "flow chart",
|
21 |
+
"mind mapping", "ideation", "brainwriting", "lateral thinking",
|
22 |
+
"problem decomposition", "synthesis", "pattern recognition", "inference",
|
23 |
+
"troubleshooting", "risk assessment", "scenario planning", "cost-benefit analysis",
|
24 |
+
"optimization", "simulation", "strategic planning", "logical operator", "chain of thought",
|
25 |
+
"step by step",
|
26 |
+
|
27 |
+
# Basic arithmetic
|
28 |
+
"addition", "subtraction", "multiplication", "division", "fraction",
|
29 |
+
"decimal", "percentage", "ratio", "proportion", "absolute value",
|
30 |
+
|
31 |
+
# Algebra
|
32 |
+
"algebra", "equation", "coefficient", "variable", "polynomial",
|
33 |
+
"quadratic", "exponential", "logarithm", "factorial", "sum",
|
34 |
+
"quotient",
|
35 |
+
"linear equation", "nonlinear equation", "system of equations",
|
36 |
+
|
37 |
+
# Data analysis and statistics
|
38 |
+
"data", "statistics", "analysis", "median",
|
39 |
+
"standard deviation", "probability",
|
40 |
+
"binomial", "normal distribution", "histogram",
|
41 |
+
"scatter plot", "correlation", "regression",
|
42 |
+
|
43 |
+
# Functions and calculus
|
44 |
+
"function", "linear function", "nonlinear function",
|
45 |
+
"intercept", "coordinate", "domain", "range", "limit",
|
46 |
+
"derivative", "integral", "differentiation",
|
47 |
+
"infinite series", "sequence", "convergence", "power series",
|
48 |
+
|
49 |
+
# Geometry and trigonometry
|
50 |
+
"geometry", "angle", "triangle", "rectangle", "square",
|
51 |
+
"circle", "polygon", "perimeter", "area", "circumference",
|
52 |
+
"diameter", "radius", "pythagorean theorem", "trigonometry",
|
53 |
+
"sine", "cosine", "tangent", "secant", "cosecant",
|
54 |
+
"cotangent", "arc", "parallelogram",
|
55 |
+
"rhombus", "trapezoid", "congruence",
|
56 |
+
|
57 |
+
# Advanced mathematics
|
58 |
+
"calculus", "differential equation", "partial derivative",
|
59 |
+
"vector", "matrix", "determinant", "eigenvalue", "eigenvector",
|
60 |
+
"linear transformation", "tensor", "multivariable calculus",
|
61 |
+
|
62 |
+
# Logic and set theory
|
63 |
+
"logic", "subset", "union", "intersection",
|
64 |
+
"element", "cardinality", "venn diagram", "truth table",
|
65 |
+
"proposition", "theorem", "proof", "induction",
|
66 |
+
|
67 |
+
# Discrete math and combinatorics
|
68 |
+
"combinatorics", "permutation", "combination", "graph theory",
|
69 |
+
"tree", "vertex",
|
70 |
+
"probability", "random variable", "expected value",
|
71 |
+
|
72 |
+
# Measurements and units
|
73 |
+
"computation", "measurement", "length", "width", "height",
|
74 |
+
"area", "volume", "density", "mass",
|
75 |
+
"velocity", "acceleration", "force",
|
76 |
+
|
77 |
+
# Additional concepts
|
78 |
+
"binary", "decimal", "octal", "hexadecimal", "modulus",
|
79 |
+
"prime number", "composite number", "greatest common divisor",
|
80 |
+
"least common multiple", "factorization", "irrational number",
|
81 |
+
"complex number", "imaginary unit", "real number",
|
82 |
+
"absolute value"
|
83 |
+
]
|
84 |
+
|
85 |
+
# Escape special regex characters and add word boundaries
|
86 |
+
science_keywords = [
|
87 |
+
r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list
|
88 |
+
]
|
89 |
+
|
90 |
+
# Combine science keywords into a single regex pattern using non-capturing groups
|
91 |
+
science_regex = r'(?:' + r'|'.join(science_keywords) + r')'
|
92 |
+
|
93 |
+
# Function to process a chunk of the dataset
|
94 |
+
def process_chunk(chunk):
|
95 |
+
# Assign column names if they are not already set
|
96 |
+
if list(chunk.columns) != ['score', 'text', 'url']:
|
97 |
+
chunk.columns = ['score', 'text', 'url']
|
98 |
+
|
99 |
+
# Use vectorized string operations for efficiency
|
100 |
+
# Count the number of matches in each column
|
101 |
+
score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
|
102 |
+
url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
|
103 |
+
text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
|
104 |
+
|
105 |
+
# Handle NaN values by filling them with zero
|
106 |
+
score_counts = score_counts.fillna(0)
|
107 |
+
url_counts = url_counts.fillna(0)
|
108 |
+
text_counts = text_counts.fillna(0)
|
109 |
+
|
110 |
+
# Sum the counts to get the science score
|
111 |
+
match_counts = score_counts + url_counts + text_counts
|
112 |
+
match_counts = match_counts.astype(int)
|
113 |
+
|
114 |
+
#
|
115 |
+
#
|
116 |
+
# Set a threshold for the minimum science score
|
117 |
+
threshold = 50 # Adjust this value as needed
|
118 |
+
#
|
119 |
+
#
|
120 |
+
|
121 |
+
# Filter rows that meet the threshold
|
122 |
+
filtered_chunk = chunk[match_counts >= threshold].copy()
|
123 |
+
filtered_chunk['science_score'] = match_counts[match_counts >= threshold]
|
124 |
+
|
125 |
+
# Replace the original 'score' with 'science_score'
|
126 |
+
filtered_chunk['score'] = filtered_chunk['science_score']
|
127 |
+
filtered_chunk = filtered_chunk.drop(columns=['science_score'])
|
128 |
+
|
129 |
+
return filtered_chunk
|
130 |
+
|
131 |
+
# Function to process a single CSV file
|
132 |
+
def process_file(input_file, output_file):
|
133 |
+
# Read the CSV file in chunks, assuming no header in the CSV file
|
134 |
+
chunk_size = 10000 # Adjust this value based on your memory constraints
|
135 |
+
reader = pd.read_csv(input_file, chunksize=chunk_size, header=None)
|
136 |
+
|
137 |
+
# Prepare the output file
|
138 |
+
first_chunk = True
|
139 |
+
|
140 |
+
# Number of worker processes
|
141 |
+
num_workers = 20 # Adjust based on your CPU cores
|
142 |
+
|
143 |
+
# Batch size for chunks to process in parallel
|
144 |
+
batch_size = num_workers * 4 # Adjust based on memory constraints
|
145 |
+
|
146 |
+
chunk_list = []
|
147 |
+
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
148 |
+
for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
|
149 |
+
chunk_list.append(chunk)
|
150 |
+
if len(chunk_list) == batch_size:
|
151 |
+
# Process batch of chunks in parallel
|
152 |
+
futures = [executor.submit(process_chunk, c) for c in chunk_list]
|
153 |
+
for future in tqdm(futures, desc='Processing batch', leave=False):
|
154 |
+
filtered_chunk = future.result()
|
155 |
+
if not filtered_chunk.empty:
|
156 |
+
if first_chunk:
|
157 |
+
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
|
158 |
+
first_chunk = False
|
159 |
+
else:
|
160 |
+
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
|
161 |
+
chunk_list = []
|
162 |
+
# Process any remaining chunks
|
163 |
+
if chunk_list:
|
164 |
+
futures = [executor.submit(process_chunk, c) for c in chunk_list]
|
165 |
+
for future in tqdm(futures, desc='Processing last batch', leave=False):
|
166 |
+
filtered_chunk = future.result()
|
167 |
+
if not filtered_chunk.empty:
|
168 |
+
if first_chunk:
|
169 |
+
filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
|
170 |
+
first_chunk = False
|
171 |
+
else:
|
172 |
+
filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
|
173 |
+
print(f'Finished processing {input_file}')
|
174 |
+
|
175 |
+
# List of directories to process
|
176 |
+
data_dir = '/media/joe/512-3/csv'
|
177 |
+
years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed
|
178 |
+
directories = [os.path.join(data_dir, year) for year in years]
|
179 |
+
|
180 |
+
# Process each CSV file in each directory
|
181 |
+
for dir_path in directories:
|
182 |
+
if not os.path.isdir(dir_path):
|
183 |
+
print(f'Directory not found: {dir_path}')
|
184 |
+
continue
|
185 |
+
csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
|
186 |
+
print(f'Found {len(csv_files)} CSV files in {dir_path}')
|
187 |
+
for input_file in csv_files:
|
188 |
+
# Construct output file name
|
189 |
+
base_name = os.path.basename(input_file)
|
190 |
+
output_file = os.path.join(
|
191 |
+
dir_path, 'math_' + base_name
|
192 |
+
)
|
193 |
+
|
194 |
+
# Check if output file already exists to avoid reprocessing
|
195 |
+
if os.path.exists(output_file):
|
196 |
+
print(f'Output file already exists. Skipping: {output_file}')
|
197 |
+
continue
|
198 |
+
|
199 |
+
process_file(input_file, output_file)
|
200 |
+
|