fullstack commited on
Commit
21300bd
·
verified ·
1 Parent(s): 2cdf2dd

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. images.zip +3 -0
  3. train.jsonl +3 -0
  4. visual_dataset.py +115 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd1a4cae5a787c50460cd085dac5995d92ac87e5dcbc54e0b61b3276fb3b6c1
3
+ size 1407451163
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c92b25a69c9f6d616b6f5d53b335e3560a3cc3f9f2339d279a93a0b4a5401b0c
3
+ size 645695224
visual_dataset.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import os
4
+ import json
5
+ import base64
6
+ import argparse
7
+ from datetime import datetime
8
+ from pathlib import Path
9
+ from PIL import Image
10
+ from io import BytesIO
11
+ from tqdm import tqdm
12
+
13
+ def get_output_filename():
14
+ """Generate unique output filename with timestamp"""
15
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
16
+ return f"output_images_{timestamp}.jsonl"
17
+
18
+ def encode_image_base64(pil_image):
19
+ """Convert PIL Image to base64 string"""
20
+ buffered = BytesIO()
21
+ # Convert to RGB if RGBA
22
+ if pil_image.mode == 'RGBA':
23
+ pil_image = pil_image.convert('RGB')
24
+ pil_image.save(buffered, format="JPEG", quality=95)
25
+ return base64.b64encode(buffered.getvalue()).decode('utf-8')
26
+
27
+ def process_image(image_path, output_file):
28
+ """Process single image and write to JSONL with conditional resizing"""
29
+ MAX_WIDTH = 800
30
+ MAX_HEIGHT = 1000
31
+
32
+ try:
33
+ image = Image.open(image_path)
34
+ width, height = image.size
35
+
36
+ # Only resize if image exceeds max dimensions
37
+ if width > MAX_WIDTH or height > MAX_HEIGHT:
38
+ # Calculate aspect ratio preserving dimensions
39
+ ratio = min(MAX_WIDTH/width, MAX_HEIGHT/height)
40
+ new_size = (int(width * ratio), int(height * ratio))
41
+ processed_image = image.resize(new_size, Image.Resampling.LANCZOS)
42
+ else:
43
+ processed_image = image
44
+ new_size = (width, height)
45
+
46
+ entry = {
47
+ "source": str(image_path),
48
+ "timestamp": datetime.now().isoformat(),
49
+ "width": new_size[0],
50
+ "height": new_size[1],
51
+ "original_width": width,
52
+ "original_height": height,
53
+ "was_resized": width > MAX_WIDTH or height > MAX_HEIGHT,
54
+ "image": encode_image_base64(processed_image)
55
+ }
56
+
57
+ with open(output_file, 'a') as f:
58
+ f.write(json.dumps(entry) + '\n')
59
+
60
+ return True
61
+
62
+ except Exception as e:
63
+ print(f"Error processing {image_path}: {str(e)}")
64
+ return False
65
+
66
+ def find_image_files(directory):
67
+ """Recursively find all image files in directory"""
68
+ image_extensions = {'.jpg', '.jpeg', '.png'}
69
+ image_files = []
70
+
71
+ for path in Path(directory).rglob('*'):
72
+ if path.suffix.lower() in image_extensions:
73
+ image_files.append(path)
74
+
75
+ return image_files
76
+
77
+ def main():
78
+ parser = argparse.ArgumentParser(description='Convert images to JSONL format')
79
+ parser.add_argument('--input', default='./images',
80
+ help='Input directory containing images (default: ./images)')
81
+ parser.add_argument('--output', default=None,
82
+ help='Output JSONL file (default: output_images_<timestamp>.jsonl)')
83
+
84
+ args = parser.parse_args()
85
+
86
+ # Setup paths
87
+ input_dir = Path(args.input)
88
+ output_file = args.output or get_output_filename()
89
+
90
+ if not input_dir.exists():
91
+ raise FileNotFoundError(f"Input directory {input_dir} not found")
92
+
93
+ # Get list of image files
94
+ image_files = find_image_files(input_dir)
95
+
96
+ if not image_files:
97
+ print(f"No image files found in {input_dir}")
98
+ return
99
+
100
+ print(f"Found {len(image_files)} image files")
101
+ print(f"Output will be written to: {output_file}")
102
+
103
+ # Process each image
104
+ successful = 0
105
+ for image_file in tqdm(image_files, desc="Processing images"):
106
+ if process_image(image_file, output_file):
107
+ successful += 1
108
+ # Print progress after each file
109
+ tqdm.write(f"Processed {image_file}")
110
+
111
+ print(f"\nComplete! Successfully processed {successful} out of {len(image_files)} files")
112
+ print(f"Output written to: {output_file}")
113
+
114
+ if __name__ == "__main__":
115
+ main()