Datasets:
Add parquet file test
Browse files- .gitattributes +2 -0
- convert.py +0 -66
- train.parquet → train-0000.parquet +0 -0
- train-0001.parquet +3 -0
.gitattributes
CHANGED
@@ -3608,3 +3608,5 @@ processed_images/r31-0919.png filter=lfs diff=lfs merge=lfs -text
|
|
3608 |
processed_images/r31-1876.png filter=lfs diff=lfs merge=lfs -text
|
3609 |
processed_images/r32-0784.png filter=lfs diff=lfs merge=lfs -text
|
3610 |
handwriting/train-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
3608 |
processed_images/r31-1876.png filter=lfs diff=lfs merge=lfs -text
|
3609 |
processed_images/r32-0784.png filter=lfs diff=lfs merge=lfs -text
|
3610 |
handwriting/train-00000-of-00001.parquet filter=lfs diff=lfs merge=lfs -text
|
3611 |
+
train-0000.parquet filter=lfs diff=lfs merge=lfs -text
|
3612 |
+
train-0001.parquet filter=lfs diff=lfs merge=lfs -text
|
convert.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pandas as pd
|
3 |
-
from PIL import Image
|
4 |
-
import codecs
|
5 |
-
import numpy as np
|
6 |
-
import glob
|
7 |
-
import io
|
8 |
-
|
9 |
-
def create_dataset():
|
10 |
-
print("Starting dataset creation...")
|
11 |
-
|
12 |
-
# Create output directory for images
|
13 |
-
os.makedirs("processed_images", exist_ok=True)
|
14 |
-
|
15 |
-
# Find all label files matching pattern
|
16 |
-
print("Finding label files...")
|
17 |
-
label_files = glob.glob("./original/test/round3?_best2019.label")
|
18 |
-
print(f"Found {len(label_files)} label files")
|
19 |
-
|
20 |
-
# Parse image filenames and captions
|
21 |
-
data = []
|
22 |
-
for i, label_path in enumerate(label_files):
|
23 |
-
print(f"\nProcessing label file {i+1}/{len(label_files)}: {label_path}")
|
24 |
-
|
25 |
-
# Read label file with Windows-874 encoding
|
26 |
-
print("Reading label file...")
|
27 |
-
with codecs.open(label_path, 'r', encoding='cp874') as f:
|
28 |
-
lines = f.readlines()
|
29 |
-
print(f"Found {len(lines)} entries")
|
30 |
-
|
31 |
-
for j, line in enumerate(lines):
|
32 |
-
if j % 100 == 0:
|
33 |
-
print(f"Processing entry {j}/{len(lines)}")
|
34 |
-
|
35 |
-
filename, caption = line.strip().split(' ', 1)
|
36 |
-
|
37 |
-
# Get folder name from first 3 chars of filename
|
38 |
-
folder = filename[:3]
|
39 |
-
image_path = f"./original/test/{folder}/{filename}"
|
40 |
-
# Load and verify image exists
|
41 |
-
if os.path.exists(image_path):
|
42 |
-
try:
|
43 |
-
# Load image and convert to bytes
|
44 |
-
img = Image.open(image_path)
|
45 |
-
img_byte_arr = io.BytesIO()
|
46 |
-
img.save(img_byte_arr, format='PNG') # Force PNG format
|
47 |
-
img_bytes = {"bytes":bytearray(img_byte_arr.getvalue())}
|
48 |
-
|
49 |
-
data.append({
|
50 |
-
'image': img_bytes, # Store as numpy array of bytes
|
51 |
-
'text': caption,
|
52 |
-
'label_file': os.path.basename(label_path)
|
53 |
-
})
|
54 |
-
except Exception as e:
|
55 |
-
print(f"Error processing image {image_path}: {e}")
|
56 |
-
print(f"\nProcessed {len(data)} total images successfully")
|
57 |
-
|
58 |
-
# Convert to dataframe and save as parquet
|
59 |
-
print("Converting to dataframe...")
|
60 |
-
df = pd.DataFrame(data)
|
61 |
-
print("Saving to parquet file...")
|
62 |
-
df.to_parquet("train.parquet", index=False)
|
63 |
-
print("Dataset creation complete!")
|
64 |
-
|
65 |
-
if __name__ == "__main__":
|
66 |
-
create_dataset()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train.parquet → train-0000.parquet
RENAMED
File without changes
|
train-0001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6f1794e3c1bd885cf620d9fcd97dd98dbe3d841b08ef9ce482d26affc0fd7c19
|
3 |
+
size 88731703
|