egrace479 commited on
Commit
338d721
1 Parent(s): d8d92b6

Add download and checksum scripts.

Browse files
scripts/checksum.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import hashlib
4
+ import csv
5
+ from tqdm import tqdm
6
+
7
+ def md5_checksum(file_path):
8
+ hash_md5 = hashlib.md5()
9
+ with open(file_path, "rb") as f:
10
+ for chunk in iter(lambda: f.read(4096), b""):
11
+ hash_md5.update(chunk)
12
+ return hash_md5.hexdigest()
13
+
14
+ def get_checksums(input_directory, output_filepath):
15
+ with open(output_filepath, 'w', newline='') as csvfile:
16
+ writer = csv.writer(csvfile)
17
+ writer.writerow(["filepath", "filename", "md5"])
18
+ for root, dirs, files in os.walk(input_directory):
19
+ n_files = len(files)
20
+ for name in tqdm(files, total=n_files, desc="MD5ing"):
21
+ file_path = os.path.join(root, name)
22
+ checksum = md5_checksum(file_path)
23
+ writer.writerow([file_path, name, checksum])
24
+ print(f"Checksums written to {output_filepath}")
25
+
26
+ if __name__ == "__main__":
27
+ parser = argparse.ArgumentParser(description="Generate MD5 checksums for files in a directory")
28
+ parser.add_argument("--input-directory", required=True, help="Directory to traverse for files")
29
+ parser.add_argument("--output-filepath", required=True, help="Filepath for the output CSV file")
30
+ args = parser.parse_args()
31
+ get_checksums(args.input_directory, args.output_filepath)
32
+
scripts/download_jiggins_subset.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified code from https://huggingface.co/datasets/imageomics/Comparison-Subset-Jiggins/blob/main/scripts/download_jiggins_subset.py
2
+ # For downloading Jiggins images from any of the master CSV files
3
+ # Generates Checksum file for all images download
4
+ # logs image download in json file
5
+
6
+ import requests
7
+ import shutil
8
+ import json
9
+
10
+ import pandas as pd
11
+ from checksum import get_checksums
12
+
13
+ from tqdm import tqdm
14
+ import os
15
+ import argparse
16
+
17
+
18
+ def parse_args():
19
+ parser = argparse.ArgumentParser()
20
+ parser.add_argument("--csv", required=True, help="Path to CSV file with urls.", nargs="?")
21
+ parser.add_argument("--output", required=True, help="Main directory to download images into.", nargs="?")
22
+
23
+ return parser.parse_args()
24
+
25
+
26
+ def update_log(log_data, index, image, url, response_code):
27
+ # log status
28
+ log_entry = {}
29
+ log_entry["Image"] = image
30
+ log_entry["zenodo_link"] = url
31
+ log_entry["Response_status"] = response_code
32
+ log_data[index] = log_entry
33
+
34
+ return log_data
35
+
36
+
37
+ def download_images(csv_path, image_folder, log_filepath):
38
+ #load csv
39
+ jiggins_data = pd.read_csv(csv_path)
40
+ log_data = {}
41
+
42
+ for i in tqdm(range(0, len(jiggins_data))) :
43
+ species = jiggins_data["Taxonomic_Name"][i]
44
+ image_name = jiggins_data["X"][i].astype(str) + "_" + jiggins_data["Image_name"][i]
45
+
46
+ #download the image from url is not already downloaded
47
+ if os.path.exists(f"{image_folder}/{species}/{image_name}") != True:
48
+ #get image from url
49
+ url = jiggins_data["zenodo_link"][i]
50
+ response = requests.get(url, stream=True)
51
+
52
+ # log status
53
+ log_data = update_log(log_data,
54
+ index = i,
55
+ image = species + "/" + image_name,
56
+ url = url,
57
+ response_code = response.status_code
58
+ )
59
+
60
+ #create the species appropriate folder if necessary
61
+ if os.path.exists(f"{image_folder}/{species}") != True:
62
+ os.makedirs(f"{image_folder}/{species}", exist_ok=False)
63
+
64
+ #download the image
65
+ if response.status_code == 200:
66
+ with open(f"{image_folder}/{species}/{image_name}", "wb") as out_file:
67
+ shutil.copyfileobj(response.raw, out_file)
68
+ del response
69
+
70
+ with open(log_filepath, "w") as log_file:
71
+ json.dump(log_data, log_file, indent = 4)
72
+
73
+ return
74
+
75
+ def main():
76
+
77
+ #get arguments from commandline
78
+ args = parse_args()
79
+ csv_path = args.csv #path to our csv with urls to download images from
80
+ image_folder = args.output #folder where dataset will be downloaded to
81
+
82
+ # log file location
83
+ log_filepath = csv_path.split(".")[0] + "_log.json"
84
+
85
+ #dowload images from urls
86
+ download_images(csv_path, image_folder, log_filepath)
87
+
88
+ # generate checksums and save CSV to same folder as CSV used for download
89
+ checksum_path = csv_path.split(".")[0] + "_checksums.csv"
90
+ get_checksums(image_folder, checksum_path)
91
+
92
+ print(f"Images downloaded from {csv_path} to {image_folder}.")
93
+ print(f"Checksums recorded in {checksum_path} and download log is in {log_filepath}.")
94
+
95
+ return
96
+
97
+ if __name__ == "__main__":
98
+ main()