Datasets:
File size: 8,528 Bytes
6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc 38a17b3 6ef14cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
# Built on Michelle's download script: https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/977a934e1eef18f6b6152da430ac83ba6f7bd30f/download_jiggins_subset.py
# with modification of David's redo loop: https://github.com/Imageomics/data-fwg/blob/anomaly-data-challenge/HDR-anomaly-data-challenge/notebooks/download_images.ipynb
# and expanded logging and file checks. Further added checksum calculation for all downloaded images at end.
# Script to download Jiggins images from any of the master CSV files.
# Generates Checksum file for all images downloaded (<master filename>_checksums.csv).
# Logs image downloads and failures in json files (<master filename>_log.json & <master filename>_error_log.json).
# Logs record numbers and response codes as strings, not int64.
import requests
import shutil
import json
import pandas as pd
from checksum import get_checksums
from tqdm import tqdm
import os
import sys
import time
import argparse
EXPECTED_COLS = ["CAMID",
"X",
"Image_name",
"file_url",
"Taxonomic_Name",
"record_number",
"Dataset"
]
REDO_CODE_LIST = [429, 500, 502, 503, 504]
# Reset to appropriate index if download gets interrupted.
STARTING_INDEX = 0
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?")
return parser.parse_args()
def log_response(log_data, index, image, url, record_number, dataset, cam_id, response_code):
# log status
log_entry = {}
log_entry["Image"] = image
log_entry["file_url"] = url
log_entry["record_number"] = str(record_number) #int64 has problems sometimes
log_entry["dataset"] = dataset
log_entry["CAMID"] = cam_id
log_entry["Response_status"] = str(response_code)
log_data[index] = log_entry
return log_data
def update_log(log, index, filepath):
# save logs
with open(filepath, "a") as log_file:
json.dump(log[index], log_file, indent = 4)
log_file.write("\n")
def download_images(jiggins_data, image_folder, log_filepath, error_log_filepath):
log_data = {}
log_errors = {}
for i in tqdm(range(0, len(jiggins_data))) :
# species will really be <Genus> <species> ssp. <subspecies>, where subspecies indicated
species = jiggins_data["Taxonomic_Name"][i]
image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
record_number = jiggins_data["record_number"][i]
# download the image from url if not already downloaded
# Will attempt to download everything in CSV (image_name is unique: <X>_<Image_name>), unless download restarted
if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
#get image from url
url = jiggins_data["file_url"][i]
dataset = jiggins_data["Dataset"][i]
cam_id = jiggins_data["CAMID"][i]
#download the image
redo = True
max_redos = 2
while redo and max_redos > 0:
try:
response = requests.get(url, stream=True)
except Exception as e:
redo = True
max_redos -= 1
if max_redos <= 0:
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = str(e))
update_log(log = log_errors, index = i, filepath = error_log_filepath)
if response.status_code == 200:
redo = False
# log status
log_data = log_response(log_data,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code
)
update_log(log = log_data, index = i, filepath = log_filepath)
#create the species appropriate folder if necessary
if os.path.exists(f"{image_folder}/{species}") != True:
os.makedirs(f"{image_folder}/{species}", exist_ok=False)
# save image to appropriate folder
with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
# check for too many requests
elif response.status_code in REDO_CODE_LIST:
redo = True
max_redos -= 1
if max_redos <= 0:
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code)
update_log(log = log_errors, index = i, filepath = error_log_filepath)
else:
time.sleep(1)
else: #other fail, eg. 404
redo = False
log_errors = log_response(log_errors,
index = i,
image = species + "/" + image_name,
url = url,
record_number = record_number,
dataset = dataset,
cam_id = cam_id,
response_code = response.status_code)
update_log(log = log_errors, index = i, filepath = error_log_filepath)
del response
else:
if i > STARTING_INDEX:
# No need to print if download is restarted due to interruption (set STARTING_INDEX accordingly).
print(f"duplicate image: {jiggins_data['X']}, {jiggins_data['Image_name']}, from record {record_number}")
return
def main():
#get arguments from commandline
args = parse_args()
csv_path = args.csv #path to our csv with urls to download images from
image_folder = args.output #folder where dataset will be downloaded to
# log file location (folder of source CSV)
log_filepath = csv_path.split(".")[0] + "_log.json"
error_log_filepath = csv_path.split(".")[0] + "_error_log.json"
#load csv
jiggins_data = pd.read_csv(csv_path, low_memory = False)
# Check for required columns
missing_cols = []
for col in EXPECTED_COLS:
if col not in list(jiggins_data.columns):
missing_cols.append(col)
if len(missing_cols) > 0:
sys.exit(f"The CSV is missing column(s): {missing_cols}")
#dowload images from urls
download_images(jiggins_data, image_folder, log_filepath, error_log_filepath)
# generate checksums and save CSV to same folder as CSV used for download
checksum_path = csv_path.split(".")[0] + "_checksums.csv"
get_checksums(image_folder, checksum_path)
print(f"Images downloaded from {csv_path} to {image_folder}.")
print(f"Checksums recorded in {checksum_path} and download logs are in {log_filepath} and {error_log_filepath}.")
return
if __name__ == "__main__":
main()
|