leonleyang commited on
Commit
123635d
·
verified ·
1 Parent(s): 3351e1c

Create country211.py

Browse files
Files changed (1) hide show
  1. country211.py +105 -0
country211.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tarfile
2
+ from tqdm import tqdm
3
+ import datasets
4
+ from PIL import Image
5
+ import io
6
+
7
+
8
+ class Country211(datasets.GeneratorBasedBuilder):
9
+ """Country211: Image Classification Dataset for Geolocation.
10
+ This dataset uses a subset of the YFCC100M dataset, filtered by GPS coordinates to include images labeled
11
+ with ISO-3166 country codes. Each country has a balanced sample of images for training, validation, and testing.
12
+ """
13
+
14
+ VERSION = datasets.Version("1.0.0")
15
+
16
+ def _info(self):
17
+ return datasets.DatasetInfo(
18
+ description="Country211 dataset for image classification by country.",
19
+ features=datasets.Features({
20
+ "image": datasets.Image(),
21
+ "label": datasets.ClassLabel(names=self._class_names())
22
+ }),
23
+ supervised_keys=("image", "label"),
24
+ homepage="https://github.com/openai/CLIP/blob/main/data/country211.md",
25
+ citation="""@inproceedings{radford2021learning,
26
+ title={Learning transferable visual models from natural language supervision},
27
+ author={Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and others},
28
+ booktitle={International conference on machine learning},
29
+ pages={8748--8763},
30
+ year={2021},
31
+ organization={PMLR}}"""
32
+ )
33
+
34
+ def _split_generators(self, dl_manager):
35
+ # Define download URL and local path
36
+ urls = "https://openaipublic.azureedge.net/clip/data/country211.tgz"
37
+ archive_path = dl_manager.download(urls)
38
+
39
+ return [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN,
42
+ gen_kwargs={"archive_path": archive_path, "split": "train"}
43
+ ),
44
+ datasets.SplitGenerator(
45
+ name=datasets.Split.VALIDATION,
46
+ gen_kwargs={"archive_path": archive_path, "split": "valid"}
47
+ ),
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TEST,
50
+ gen_kwargs={"archive_path": archive_path, "split": "test"}
51
+ ),
52
+ ]
53
+
54
+ def _generate_examples(self, archive_path, split):
55
+ """Generate examples from the tar archive."""
56
+ with tarfile.open(archive_path, "r:gz") as archive:
57
+ # Navigate to the relevant split directory within the archive
58
+ split_dir = f"country211/{split}"
59
+
60
+ # Get the class names
61
+ class_names = self._class_names()
62
+ class_to_idx = {name: idx for idx, name in enumerate(class_names)}
63
+
64
+ # Initialize a counter for unique IDs
65
+ idx = 0
66
+
67
+ for member in tqdm(archive.getmembers(), desc=f"Processing {split} split"):
68
+ # Only process files within the specific split directory
69
+ if member.isfile() and member.name.startswith(split_dir):
70
+ # Extract the country code from the directory name
71
+ path_parts = member.name.split("/")
72
+ country_code = path_parts[2]
73
+
74
+ # Check if the country code is valid
75
+ if country_code in class_to_idx:
76
+ label = class_to_idx[country_code]
77
+
78
+ # Extract and open the image
79
+ with archive.extractfile(member) as file:
80
+ image = Image.open(io.BytesIO(file.read())).convert("RGB")
81
+
82
+ # Use the counter as the unique ID
83
+ yield idx, {
84
+ "image": image,
85
+ "label": label,
86
+ }
87
+ idx += 1 # Increment the counter for the next image
88
+ else:
89
+ raise ValueError(f"Invalid country code: {country_code}")
90
+
91
+ @staticmethod
92
+ def _class_names():
93
+ return ['AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB',
94
+ 'BD', 'BE', 'BF', 'BG', 'BH', 'BJ', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT', 'BW', 'BY', 'BZ', 'CA',
95
+ 'CD', 'CF', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CY', 'CZ', 'DE', 'DK',
96
+ 'DM', 'DO', 'DZ', 'EC', 'EE', 'EG', 'ES', 'ET', 'FI', 'FJ', 'FK', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE',
97
+ 'GF', 'GG', 'GH', 'GI', 'GL', 'GM', 'GP', 'GR', 'GS', 'GT', 'GU', 'GY', 'HK', 'HN', 'HR', 'HT', 'HU',
98
+ 'ID', 'IE', 'IL', 'IM', 'IN', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KN',
99
+ 'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LT', 'LU', 'LV', 'LY', 'MA', 'MC',
100
+ 'MD', 'ME', 'MF', 'MG', 'MK', 'ML', 'MM', 'MN', 'MO', 'MQ', 'MR', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY',
101
+ 'MZ', 'NA', 'NC', 'NG', 'NI', 'NL', 'NO', 'NP', 'NZ', 'OM', 'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL',
102
+ 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE', 'RO', 'RS', 'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG',
103
+ 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SS', 'SV', 'SX', 'SY', 'SZ', 'TG', 'TH', 'TJ', 'TL',
104
+ 'TM', 'TN', 'TO', 'TR', 'TT', 'TW', 'TZ', 'UA', 'UG', 'US', 'UY', 'UZ', 'VA', 'VE', 'VG', 'VI', 'VN',
105
+ 'VU', 'WS', 'XK', 'YE', 'ZA', 'ZM', 'ZW']