Josephgflowers commited on
Commit
073b267
1 Parent(s): 8016326

Upload find-reason-fine.py

Browse files
Files changed (1) hide show
  1. find-reason-fine.py +191 -0
find-reason-fine.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ from concurrent.futures import ProcessPoolExecutor
4
+ from tqdm import tqdm
5
+ import os
6
+ import glob
7
+
8
+ # Science keywords (formatted for regex word boundaries)
9
+ science_keywords_list = [
10
+
11
+
12
+ # Core Types of Reasoning
13
+ "deductive reasoning", "inductive reasoning", "abductive reasoning",
14
+ "deductive logic", "inductive logic", "probabilistic reasoning",
15
+ "hypothetical reasoning", "falsifiability", "meta-cognition",
16
+
17
+ # Logic Structures and Components
18
+ "syllogism", "proposition", "premise", "conclusion", "logical fallacy",
19
+ "argument", "logical consistency", "logical operator", "step by step",
20
+
21
+ # Analytical and Critical Thinking
22
+ "critical thinking", "analytical skills", "creative thinking",
23
+ "convergent thinking", "divergent thinking", "contextual analysis",
24
+ "pattern recognition", "structured reflection", "reasoned judgment",
25
+ "cognitive load", "counterfactual thinking", "comparative reasoning",
26
+ "subjective reasoning", "objective reasoning", "systematic approach",
27
+
28
+ # Hypothesis and Evidence Analysis
29
+ "hypothesis testing", "hypothesis generation", "evidence-based reasoning",
30
+ "empirical reasoning", "evidence synthesis", "confirmation bias",
31
+ "cognitive bias", "causation vs correlation", "construct validity",
32
+
33
+ # Problem Solving and Decision Making
34
+ "problem analysis", "brainstorming", "decision making", "decision fatigue",
35
+ "satisficing", "bounded rationality", "opportunity cost",
36
+ "cost-benefit analysis", "optimization", "strategic planning",
37
+ "trade-off analysis", "prioritization matrix", "value prioritization",
38
+
39
+ # Heuristics and Algorithms
40
+ "heuristic", "heuristic reasoning", "algorithm", "recursive thinking",
41
+ "pattern matching", "dynamic programming", "systematic approach",
42
+
43
+ # Data Analysis and Modeling
44
+ "data analysis", "causal reasoning", "correlation", "probabilistic inference",
45
+ "qualitative analysis", "quantitative analysis", "predictive modeling",
46
+ "belief revision", "mental modeling", "proportional reasoning",
47
+
48
+ # Cognitive Processes and Biases
49
+ "cognitive dissonance", "framing effect", "bias mitigation",
50
+ "normative reasoning", "intuitive thinking", "belief bias",
51
+
52
+ # Argumentation and Discourse
53
+ "counterargument", "debate", "dialectic", "socratic questioning",
54
+ "disjunctive reasoning", "conjunctive reasoning", "chain of thought",
55
+
56
+ # Problem Decomposition and Structuring
57
+ "root cause analysis", "5 whys", "decision tree", "flow chart",
58
+ "process mapping", "mind mapping", "ideation", "brainwriting",
59
+ "problem decomposition", "value chain analysis",
60
+
61
+ # Analytical Frameworks and Techniques
62
+ "SWOT analysis", "gap analysis", "risk assessment", "scenario planning",
63
+ "simulation", "backcasting", "game theory", "decision matrix",
64
+ "opportunity analysis", "knowledge representation",
65
+
66
+ # Creative Thinking and Synthesis
67
+ "lateral thinking", "synergistic thinking", "brainstorming",
68
+ "synthesis", "ideation", "hypothetical deduction",
69
+
70
+ # Additional Analytical Techniques
71
+ "comparative analysis", "analytical hierarchy process", "multicriteria decision analysis",
72
+ "trade-off analysis", "constraint analysis", "thought experiment",
73
+ ]
74
+
75
+
76
+ # Escape special regex characters and add word boundaries
77
+ science_keywords = [
78
+ r"\b" + re.escape(keyword).replace(r'\ ', ' ') + r"\b" for keyword in science_keywords_list
79
+ ]
80
+
81
+ # Combine science keywords into a single regex pattern using non-capturing groups
82
+ science_regex = r'(?:' + r'|'.join(science_keywords) + r')'
83
+
84
+ # Function to process a chunk of the dataset
85
+ def process_chunk(chunk):
86
+ # Assign column names if they are not already set
87
+ if list(chunk.columns) != ['score', 'text', 'url']:
88
+ chunk.columns = ['score', 'text', 'url']
89
+
90
+ # Use vectorized string operations for efficiency
91
+ # Count the number of matches in each column
92
+ score_counts = chunk['score'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
93
+ url_counts = chunk['url'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
94
+ text_counts = chunk['text'].astype(str).str.count(science_regex, flags=re.IGNORECASE)
95
+
96
+ # Handle NaN values by filling them with zero
97
+ score_counts = score_counts.fillna(0)
98
+ url_counts = url_counts.fillna(0)
99
+ text_counts = text_counts.fillna(0)
100
+
101
+ # Sum the counts to get the science score
102
+ match_counts = score_counts + url_counts + text_counts
103
+ match_counts = match_counts.astype(int)
104
+
105
+ #
106
+ #
107
+ # Set a threshold for the minimum science score
108
+ threshold = 15 # Adjust this value as needed
109
+ #
110
+ #
111
+
112
+ # Filter rows that meet the threshold
113
+ filtered_chunk = chunk[match_counts >= threshold].copy()
114
+ filtered_chunk['science_score'] = match_counts[match_counts >= threshold]
115
+
116
+ # Replace the original 'score' with 'science_score'
117
+ filtered_chunk['score'] = filtered_chunk['science_score']
118
+ filtered_chunk = filtered_chunk.drop(columns=['science_score'])
119
+
120
+ return filtered_chunk
121
+
122
+ # Function to process a single CSV file
123
+ def process_file(input_file, output_file):
124
+ # Read the CSV file in chunks, assuming no header in the CSV file
125
+ chunk_size = 10000 # Adjust this value based on your memory constraints
126
+ reader = pd.read_csv(input_file, chunksize=chunk_size, header=None)
127
+
128
+ # Prepare the output file
129
+ first_chunk = True
130
+
131
+ # Number of worker processes
132
+ num_workers = 8 # Adjust based on your CPU cores
133
+
134
+ # Batch size for chunks to process in parallel
135
+ batch_size = num_workers * 4 # Adjust based on memory constraints
136
+
137
+ chunk_list = []
138
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
139
+ for chunk in tqdm(reader, desc=f'Reading chunks from {os.path.basename(input_file)}'):
140
+ chunk_list.append(chunk)
141
+ if len(chunk_list) == batch_size:
142
+ # Process batch of chunks in parallel
143
+ futures = [executor.submit(process_chunk, c) for c in chunk_list]
144
+ for future in tqdm(futures, desc='Processing batch', leave=False):
145
+ filtered_chunk = future.result()
146
+ if not filtered_chunk.empty:
147
+ if first_chunk:
148
+ filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
149
+ first_chunk = False
150
+ else:
151
+ filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
152
+ chunk_list = []
153
+ # Process any remaining chunks
154
+ if chunk_list:
155
+ futures = [executor.submit(process_chunk, c) for c in chunk_list]
156
+ for future in tqdm(futures, desc='Processing last batch', leave=False):
157
+ filtered_chunk = future.result()
158
+ if not filtered_chunk.empty:
159
+ if first_chunk:
160
+ filtered_chunk.to_csv(output_file, mode='w', index=False, header=False)
161
+ first_chunk = False
162
+ else:
163
+ filtered_chunk.to_csv(output_file, mode='a', index=False, header=False)
164
+ print(f'Finished processing {input_file}')
165
+
166
+ # List of directories to process
167
+ data_dir = '/media/joe/512-3/csv'
168
+ years = [f'CC-MAIN-{year}' for year in range(2013, 2025)] # Adjust years as needed
169
+ directories = [os.path.join(data_dir, year) for year in years]
170
+
171
+ # Process each CSV file in each directory
172
+ for dir_path in directories:
173
+ if not os.path.isdir(dir_path):
174
+ print(f'Directory not found: {dir_path}')
175
+ continue
176
+ csv_files = glob.glob(os.path.join(dir_path, '*.csv'))
177
+ print(f'Found {len(csv_files)} CSV files in {dir_path}')
178
+ for input_file in csv_files:
179
+ # Construct output file name
180
+ base_name = os.path.basename(input_file)
181
+ output_file = os.path.join(
182
+ dir_path, 'reason_' + base_name
183
+ )
184
+
185
+ # Check if output file already exists to avoid reprocessing
186
+ if os.path.exists(output_file):
187
+ print(f'Output file already exists. Skipping: {output_file}')
188
+ continue
189
+
190
+ process_file(input_file, output_file)
191
+