unsplash-lite / script-generation.py
1aurent's picture
Update script-generation.py
20c2bc8
raw
history blame
11.7 kB
from pathlib import Path
import datasets
import pandas as pd
_VERSION = "1.2.1"
_CITATION = f"""
@dataset{{unsplash-lite-dataset,
title = {{Unsplash Lite Dataset {_VERSION}}},
url = {{\\url{{https://github.com/unsplash/datasets}}}},
author = {{Unsplash}},
year = {{2023}},
month = {{May}},
day = {{02}},
}}
"""
_DESCRIPTION = """
This dataset, available for commercial and noncommercial usage,
contains 25k nature-themed Unsplash photos, 25k keywords, and 1M searches.
"""
_HOMEPAGE = f"https://github.com/unsplash/datasets/tree/{_VERSION}"
_URL = f"https://unsplash.com/data/lite/{_VERSION}"
_LICENSE = "Unsplash Dataset License"
_TSV = (
"collections",
"colors",
"conversions",
"keywords",
"photos",
)
_FEATURES = datasets.Features(
{
"photo": {
"id": datasets.Value("string"),
"url": datasets.Value("string"),
"image_url": datasets.Value("string"),
"submitted_at": datasets.Value("string"),
"featured": datasets.Value("bool"),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"aspect_ratio": datasets.Value("float32"),
"description": datasets.Value("string"),
"blur_hash": datasets.Value("string"),
},
"photographer": {
"username": datasets.Value("string"),
"first_name": datasets.Value("string"),
"last_name": datasets.Value("string"),
},
"exif": {
"camera_make": datasets.Value("string"),
"camera_model": datasets.Value("string"),
"iso": datasets.Value("string"),
"aperture_value": datasets.Value("string"),
"focal_length": datasets.Value("string"),
"exposure_time": datasets.Value("string"),
},
"location": {
"name": datasets.Value("string"),
"latitude": datasets.Value("float32"),
"longitude": datasets.Value("float32"),
"country": datasets.Value("string"),
"city": datasets.Value("string"),
},
"stats": {
"views": datasets.Value("uint32"),
"downloads": datasets.Value("uint32"),
},
"ai": {
"description": datasets.Value("string"),
"primary_landmark_name": datasets.Value("string"),
"primary_landmark_latitude": datasets.Value("string"),
"primary_landmark_longitude": datasets.Value("string"),
"primary_landmark_confidence": datasets.Value("string"),
},
"keywords": [
{
"keyword": datasets.Value("string"),
"ai_service_1_confidence": datasets.Value("string"),
"ai_service_2_confidence": datasets.Value("string"),
"suggested_by_user": datasets.Value("bool"),
},
],
"collections": [
{
"collection_id": datasets.Value("string"),
"collection_title": datasets.Value("string"),
"photo_collected_at": datasets.Value("string"),
},
],
"conversions": [
{
"converted_at": datasets.Value("string"),
"conversion_type": datasets.Value("string"),
"keyword": datasets.Value("string"),
"anonymous_user_id": datasets.Value("string"),
"conversion_country": datasets.Value("string"),
},
],
"colors": [
{
"hex": datasets.Value("string"),
"red": datasets.Value("uint8"),
"green": datasets.Value("uint8"),
"blue": datasets.Value("uint8"),
"keyword": datasets.Value("string"),
"ai_coverage": datasets.Value("float32"),
"ai_score": datasets.Value("float32"),
},
],
},
)
def df_withprefix(df, prefix, exclude=None):
columns = [col for col in df.columns if col.startswith(prefix)]
if exclude is not None:
columns = [col for col in columns if exclude not in col]
if "photo_id" not in columns:
columns.append("photo_id")
return df[columns].rename(columns=lambda col: col.removeprefix(prefix))
class Unsplash(datasets.GeneratorBasedBuilder):
"""The Unsplash Lite dataset."""
DEFAULT_WRITER_BATCH_SIZE = 100
def _info(self):
return datasets.DatasetInfo(
features=_FEATURES,
supervised_keys=None,
description=_DESCRIPTION,
homepage=_HOMEPAGE,
license=_LICENSE,
version=_VERSION,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
archive_path = Path(dl_manager.download_and_extract(_URL))
# read all tsv files
dataframes = {}
for doc in _TSV:
# read all tsv files for this document type
frames = []
for filename in archive_path.glob(f"{doc}.tsv*"):
frame = pd.read_csv(filename, sep="\t", header=0)
frames.append(frame)
# concatenate all subframes into one
concat_frames = pd.concat(frames, axis=0, ignore_index=True)
if doc != "photos":
dataframes[doc] = concat_frames
else:
# split "photos" into "photo", "photographer", "exif", "location", "stats", "ai"
dataframes["photo"] = df_withprefix(concat_frames, "photo_", "location")
dataframes["photo"]["blur_hash"] = concat_frames["blur_hash"]
dataframes["photographer"] = df_withprefix(concat_frames, "photographer_")
dataframes["exif"] = df_withprefix(concat_frames, "exif_")
dataframes["location"] = df_withprefix(concat_frames, "photo_location_")
dataframes["stats"] = df_withprefix(concat_frames, "stats_")
dataframes["ai"] = df_withprefix(concat_frames, "ai_")
# preprocess some columns
dataframes["photo"]["featured"] = dataframes["photo"]["featured"].map({"t": True, "f": False})
dataframes["keywords"]["suggested_by_user"] = dataframes["keywords"]["suggested_by_user"].map({"t": True, "f": False})
# cast columns to appropriate dtypes
for doc in dataframes.keys():
if doc in _TSV:
features = _FEATURES[doc][0]
else:
features = _FEATURES[doc]
dataframes[doc].astype({
key: features[key].dtype
for key in features.keys()
})
# groupby "photo_id" if not "photo" dataframe
for key in _TSV[:-1]:
dataframes[key] = dataframes[key].groupby("photo_id")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"dataframes": dataframes},
),
]
def _generate_examples(self, dataframes):
# iterate over rows of "photos" dataframe
photo_id_frames = {}
for index, row in dataframes["photo"].iterrows():
photo_id = row["id"]
photographer = dataframes["photographer"].iloc[index]
exif = dataframes["exif"].iloc[index]
location = dataframes["location"].iloc[index]
stats = dataframes["stats"].iloc[index]
ai = dataframes["ai"].iloc[index]
for key in _TSV[:-1]:
try:
photo_id_frames[key] = dataframes[key].get_group(photo_id)
except:
photo_id_frames[key] = pd.DataFrame()
data = {
"photo": {
"id": photo_id,
"url": row["url"],
"image_url": row["image_url"],
"submitted_at": row["submitted_at"],
"featured": row["featured"],
"width": row["width"],
"height": row["height"],
"aspect_ratio": row["aspect_ratio"],
"description": row["description"],
"blur_hash": row["blur_hash"],
},
"photographer": {
"username": photographer["username"],
"first_name": photographer["first_name"],
"last_name": photographer["last_name"],
},
"exif": {
"camera_make": exif["camera_make"],
"camera_model": exif["camera_model"],
"iso": exif["iso"],
"aperture_value": exif["aperture_value"],
"focal_length": exif["focal_length"],
"exposure_time": exif["exposure_time"],
},
"location": {
"name": location["name"],
"latitude": location["latitude"],
"longitude": location["longitude"],
"country": location["country"],
"city": location["city"],
},
"stats": {
"views": stats["views"],
"downloads": stats["downloads"],
},
"ai": {
"description": ai["description"],
"primary_landmark_name": ai["primary_landmark_name"],
"primary_landmark_latitude": ai["primary_landmark_latitude"],
"primary_landmark_longitude": ai["primary_landmark_longitude"],
"primary_landmark_confidence": ai["primary_landmark_confidence"],
},
"keywords": [
{
"keyword": keyword["keyword"],
"ai_service_1_confidence": keyword["ai_service_1_confidence"],
"ai_service_2_confidence": keyword["ai_service_2_confidence"],
"suggested_by_user": keyword["suggested_by_user"],
}
for _, keyword in photo_id_frames["keywords"].iterrows()
],
"collections": [
{
"collection_id": collection["collection_id"],
"collection_title": str(collection["collection_title"]),
"photo_collected_at": collection["photo_collected_at"],
}
for _, collection in photo_id_frames["collections"].iterrows()
],
"conversions": [
{
"converted_at": conversion["converted_at"],
"conversion_type": conversion["conversion_type"],
"keyword": conversion["keyword"],
"anonymous_user_id": conversion["anonymous_user_id"],
"conversion_country": str(conversion["conversion_country"]),
}
for _, conversion in photo_id_frames["conversions"].iterrows()
],
"colors": [
{
"hex": color["hex"],
"red": color["red"],
"green": color["green"],
"blue": color["blue"],
"keyword": color["keyword"],
"ai_coverage": color["ai_coverage"],
"ai_score": color["ai_score"],
}
for _, color in photo_id_frames["colors"].iterrows()
],
}
yield index, data