tdurbor commited on
Commit
c134030
1 Parent(s): fe62bf5

Add upload to dataset

Browse files
Files changed (1) hide show
  1. utils/upload_to_dataset.py +89 -0
utils/upload_to_dataset.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import Dataset, Features, Value, Image
2
+ from huggingface_hub import HfApi
3
+ import os
4
+ from collections import defaultdict
5
+ import pandas as pd
6
+ import argparse
7
+
8
+ def upload_to_dataset(original_images_dir, processed_images_dir, dataset_name, dry_run=False):
9
+ # Define the dataset features with dedicated columns for each model
10
+ features = Features({
11
+ "original_image": Image(), # Original image feature
12
+ "clipdrop_image": Image(), # Clipdrop segmented image
13
+ "bria_image": Image(), # Bria segmented image
14
+ "photoroom_image": Image(), # Photoroom segmented image
15
+ "removebg_image": Image(), # RemoveBG segmented image
16
+ "original_filename": Value("string") # Original filename
17
+ })
18
+
19
+ # Load image paths and metadata
20
+ data = defaultdict(lambda: {
21
+ "clipdrop_image": None,
22
+ "bria_image": None,
23
+ "photoroom_image": None,
24
+ "removebg_image": None
25
+ })
26
+
27
+ # Walk into the original images folder
28
+ for root, _, files in os.walk(original_images_dir):
29
+ for f in files:
30
+ if f.endswith(('.png', '.jpg', '.jpeg')):
31
+ original_image_path = os.path.join(root, f)
32
+ data[f]["original_image"] = original_image_path
33
+ data[f]["original_filename"] = f
34
+
35
+ # Check for corresponding images in processed directories
36
+ for source in ["clipdrop", "bria", "photoroom", "removebg"]:
37
+ # Check for processed images ending in .png or .jpg
38
+ for ext in ['.png', '.jpg']:
39
+ processed_image_filename = os.path.splitext(f)[0] + ext
40
+ source_image_path = os.path.join(processed_images_dir, source, processed_image_filename)
41
+
42
+ if os.path.exists(source_image_path):
43
+ data[f][f"{source}_image"] = source_image_path
44
+ break # Stop checking other extensions if a file is found
45
+
46
+ # Convert the data to a dictionary of lists
47
+ dataset_dict = {
48
+ "original_image": [],
49
+ "clipdrop_image": [],
50
+ "bria_image": [],
51
+ "photoroom_image": [],
52
+ "removebg_image": [],
53
+ "original_filename": []
54
+ }
55
+
56
+ for filename, entry in data.items():
57
+ if "original_image" in entry:
58
+ dataset_dict["original_image"].append(entry["original_image"])
59
+ dataset_dict["clipdrop_image"].append(entry["clipdrop_image"])
60
+ dataset_dict["bria_image"].append(entry["bria_image"])
61
+ dataset_dict["photoroom_image"].append(entry["photoroom_image"])
62
+ dataset_dict["removebg_image"].append(entry["removebg_image"])
63
+ dataset_dict["original_filename"].append(filename)
64
+
65
+ # Save the data dictionary to a CSV file for inspection
66
+ df = pd.DataFrame.from_dict(dataset_dict)
67
+ df.to_csv("image_data.csv", index=False)
68
+
69
+ # Create a Dataset
70
+ dataset = Dataset.from_dict(dataset_dict, features=features)
71
+
72
+ if dry_run:
73
+ print("Dry run: Dataset prepared but not pushed to Hugging Face Hub.")
74
+ print(df.head()) # Display the first few rows for inspection
75
+ else:
76
+ # Push the dataset to Hugging Face Hub in a private way
77
+ api = HfApi()
78
+ dataset.push_to_hub(dataset_name, token=api.token, private=True)
79
+
80
+ if __name__ == "__main__":
81
+ parser = argparse.ArgumentParser(description="Upload images to a Hugging Face dataset.")
82
+ parser.add_argument("original_images_dir", type=str, help="Directory containing the original images.")
83
+ parser.add_argument("processed_images_dir", type=str, help="Directory containing the processed images with subfolders for each model.")
84
+ parser.add_argument("dataset_name", type=str, help="Name of the dataset to upload to Hugging Face Hub.")
85
+ parser.add_argument("--dry-run", action="store_true", help="Perform a dry run without uploading to the hub.")
86
+
87
+ args = parser.parse_args()
88
+
89
+ upload_to_dataset(args.original_images_dir, args.processed_images_dir, args.dataset_name, dry_run=args.dry_run)