Datasets:
File size: 3,316 Bytes
38a17b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
# Modified code from https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/main/scripts/download_jiggins_subset.py
# For downloading Jiggins images from any of the master CSV files
# Generates Checksum file for all images download
# logs image download in json file
import requests
import shutil
import json
import pandas as pd
from checksum import get_checksums
from tqdm import tqdm
import os
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?")
return parser.parse_args()
def update_log(log_data, index, image, url, response_code):
# log status
log_entry = {}
log_entry["Image"] = image
log_entry["zenodo_link"] = url
log_entry["Response_status"] = response_code
log_data[index] = log_entry
return log_data
def download_images(csv_path, image_folder, log_filepath):
#load csv
jiggins_data = pd.read_csv(csv_path)
log_data = {}
for i in tqdm(range(0, len(jiggins_data))) :
species = jiggins_data["Taxonomic_Name"][i]
image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
#download the image from url is not already downloaded
if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
#get image from url
url = jiggins_data["zenodo_link"][i]
response = requests.get(url, stream=True)
# log status
log_data = update_log(log_data,
index = i,
image = species + "/" + image_name,
url = url,
response_code = response.status_code
)
#create the species appropriate folder if necessary
if os.path.exists(f"{image_folder}/{species}") != True:
os.makedirs(f"{image_folder}/{species}", exist_ok=False)
#download the image
if response.status_code == 200:
with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
with open(log_filepath, "w") as log_file:
json.dump(log_data, log_file, indent = 4)
return
def main():
#get arguments from commandline
args = parse_args()
csv_path = args.csv #path to our csv with urls to download images from
image_folder = args.output #folder where dataset will be downloaded to
# log file location
log_filepath = csv_path.split(".")[0] + "_log.json"
#dowload images from urls
download_images(csv_path, image_folder, log_filepath)
# generate checksums and save CSV to same folder as CSV used for download
checksum_path = csv_path.split(".")[0] + "_checksums.csv"
get_checksums(image_folder, checksum_path)
print(f"Images downloaded from {csv_path} to {image_folder}.")
print(f"Checksums recorded in {checksum_path} and download log is in {log_filepath}.")
return
if __name__ == "__main__":
main() |