Commit
·
c7ad8d9
1
Parent(s):
2a15205
yolo
Browse files- output_dataset.parquet +2 -2
- process.py +43 -34
- viz.py +4 -3
output_dataset.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:390edf7f278efb73b31a50a17456c7ea456d1980d82c9782ca6f1bf5d5926c8f
|
3 |
+
size 459119
|
process.py
CHANGED
@@ -4,20 +4,11 @@ import pyarrow as pa
|
|
4 |
import pyarrow.parquet as pq
|
5 |
import argparse
|
6 |
import re
|
7 |
-
import base64
|
8 |
|
9 |
-
def
|
10 |
-
"""
|
11 |
-
|
12 |
-
|
13 |
-
return base64.b64encode(image_file.read()).decode('utf-8')
|
14 |
-
else:
|
15 |
-
try:
|
16 |
-
with open(file_path, 'r', encoding='utf-8') as file:
|
17 |
-
return file.read()
|
18 |
-
except UnicodeDecodeError as e:
|
19 |
-
print(f"Error decoding file {file_path}: {e}")
|
20 |
-
return None
|
21 |
|
22 |
def extract_images(markdown_content):
|
23 |
"""Extract PHOTO_IDs from markdown files and return as a list."""
|
@@ -25,37 +16,55 @@ def extract_images(markdown_content):
|
|
25 |
|
26 |
def collect_data(directory):
|
27 |
data = {}
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
# Identify
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
data[problem_id] = {'Problem ID': problem_id, 'Images': [], 'in': None, 'out': None, 'cpp': None, 'md': None, 'sol.md': None}
|
38 |
|
39 |
-
#
|
40 |
for filename in os.listdir(directory):
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
image_ids = extract_images(content)
|
51 |
-
data[
|
52 |
-
|
|
|
53 |
|
54 |
return list(data.values())
|
55 |
|
56 |
def create_parquet_file(data, output_file):
|
57 |
df = pd.DataFrame(data)
|
58 |
-
|
|
|
|
|
59 |
pq.write_table(table, output_file)
|
60 |
|
61 |
def main():
|
|
|
4 |
import pyarrow.parquet as pq
|
5 |
import argparse
|
6 |
import re
|
|
|
7 |
|
8 |
+
def load_binary(file_path):
|
9 |
+
"""Load binary data from a file."""
|
10 |
+
with open(file_path, "rb") as image_file:
|
11 |
+
return image_file.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def extract_images(markdown_content):
|
14 |
"""Extract PHOTO_IDs from markdown files and return as a list."""
|
|
|
16 |
|
17 |
def collect_data(directory):
|
18 |
data = {}
|
19 |
+
# Collect all images first and map them by ID extracted from the filename
|
20 |
+
image_files = {}
|
21 |
+
for filename in os.listdir(directory):
|
22 |
+
if filename.endswith('.jpg'):
|
23 |
+
photo_id = re.search(r'(\d+)', filename)
|
24 |
+
if photo_id:
|
25 |
+
image_files[photo_id.group(1)] = os.path.join(directory, filename)
|
26 |
|
27 |
+
# Identify all problem IDs based on markdown files.
|
28 |
+
for filename in os.listdir(directory):
|
29 |
+
if filename.endswith('.md') or filename.endswith('.sol.md'):
|
30 |
+
problem_id = re.sub(r'sol\.md$', '', re.sub(r'\.md$', '', filename))
|
31 |
+
if problem_id not in data:
|
32 |
+
data[problem_id] = {'Problem ID': problem_id, 'Images': [], 'in': None, 'out': None, 'cpp': None, 'md': None, 'sol.md': None}
|
|
|
33 |
|
34 |
+
# Associate files with these problem IDs
|
35 |
for filename in os.listdir(directory):
|
36 |
+
base_name = re.sub(r'sol\.md$', '', re.sub(r'\.md$', '', filename))
|
37 |
+
extension = 'sol.md' if 'sol.md' in filename else filename.split('.')[-1]
|
38 |
+
file_path = os.path.join(directory, filename)
|
39 |
+
|
40 |
+
if base_name in data:
|
41 |
+
if extension == 'jpg':
|
42 |
+
# Load binary data instead of encoding
|
43 |
+
content = load_binary(file_path)
|
44 |
+
data[base_name]['Images'].append(content)
|
45 |
+
else:
|
46 |
+
try:
|
47 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
48 |
+
content = file.read()
|
49 |
+
except UnicodeDecodeError as e:
|
50 |
+
print(f"Error decoding file {file_path}: {e}")
|
51 |
+
continue
|
52 |
|
53 |
+
if extension in ['md', 'sol.md']:
|
54 |
+
data[base_name][extension] = content
|
55 |
+
# Extract and include image references as binary data
|
56 |
image_ids = extract_images(content)
|
57 |
+
data[base_name]['Images'] += [load_binary(image_files[id]) for id in image_ids if id in image_files]
|
58 |
+
elif extension in ['in', 'out', 'cpp']:
|
59 |
+
data[base_name][extension] = content
|
60 |
|
61 |
return list(data.values())
|
62 |
|
63 |
def create_parquet_file(data, output_file):
|
64 |
df = pd.DataFrame(data)
|
65 |
+
# Convert list of binary data to bytes for proper storage
|
66 |
+
df['Images'] = df['Images'].apply(lambda x: [memoryview(b) for b in x])
|
67 |
+
table = pa.Table.from_pandas(df, preserve_index=False)
|
68 |
pq.write_table(table, output_file)
|
69 |
|
70 |
def main():
|
viz.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
import pandas as pd
|
2 |
-
|
3 |
-
|
4 |
|
5 |
# Load the Parquet file
|
6 |
df = pd.read_parquet('output_dataset.parquet')
|
@@ -8,7 +8,8 @@ df = pd.read_parquet('output_dataset.parquet')
|
|
8 |
print(df.columns)
|
9 |
|
10 |
# Display the first few rows of the dataframe
|
11 |
-
print(df)
|
|
|
12 |
|
13 |
# Basic statistics for numerical columns
|
14 |
# print(df.describe())
|
|
|
1 |
import pandas as pd
|
2 |
+
pd.set_option('display.max_columns', None) # None means no limit
|
3 |
+
pd.set_option('display.width', None) # None means use the current terminal width
|
4 |
|
5 |
# Load the Parquet file
|
6 |
df = pd.read_parquet('output_dataset.parquet')
|
|
|
8 |
print(df.columns)
|
9 |
|
10 |
# Display the first few rows of the dataframe
|
11 |
+
print(df.loc[4, "Images"][0])
|
12 |
+
|
13 |
|
14 |
# Basic statistics for numerical columns
|
15 |
# print(df.describe())
|