LeoZhangzaolin's picture
DataCleaning and ImageTransfering.py
e012aec verified
raw
history blame
5.3 kB
#### delete unuseful columns
import csv
file_path = '/Users/leozhangzaolin/Downloads/Graptolite specimens.csv'
with open(file_path, newline='', encoding='utf-8') as file:
reader = csv.reader(file)
data = list(reader)
columns_to_delete = [
"species ID", "Phylum", "Class", "Order", "revised species name",
"total number of specimens", "specimens Serial No", "显微镜照片数量", "SLR photo No",
"相机照片数量", "跑数据照片总数", "备注", "age from", "age to", "collection No", "Microscrope photo No"
]
header = data[0]
indices_to_delete = [header.index(column) for column in columns_to_delete if column in header]
indices_to_delete.sort(reverse=True)
for row in data:
for index in indices_to_delete:
del row[index]
new_file_path = '/Users/leozhangzaolin/desktop/New_GS.csv'
with open(new_file_path, mode='w', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerows(data)
#### merge columns
file_path1 = '/Users/leozhangzaolin/desktop/New_GS.csv'
with open(file_path1, newline='', encoding='utf-8') as file1:
reader1 = csv.reader(file1)
data1 = list(reader1)
header1 = data1[0]
family_index1 = header1.index('Family') if 'Family' in header1 else None
subfamily_index1 = header1.index('Subfamily') if 'Subfamily' in header1 else None
locality_index1 = header1.index('Locality') if 'Locality' in header1 else None
longitude_index1 = header1.index('Longitude') if 'Longitude' in header1 else None
latitude_index1 = header1.index('Latitude') if 'Latitude' in header1 else None
horizon_index1 = header1.index('Horizon') if 'Horizon' in header1 else None
for row1 in data1[1:]:
family1 = row1[family_index1] if family_index1 is not None else ''
subfamily1 = row1[subfamily_index1] if subfamily_index1 is not None else 'no subfamily'
row1[family_index1] = f"{family1} ({subfamily1})" if subfamily_index1 is not None else family1
locality1 = row1[locality_index1] if locality_index1 is not None else ''
longitude1 = row1[longitude_index1] if longitude_index1 is not None else ''
latitude1 = row1[latitude_index1] if latitude_index1 is not None else ''
horizon1 = row1[horizon_index1] if horizon_index1 is not None else ''
row1[locality_index1] = f"{locality1} ({longitude1}, {latitude1}, {horizon1})"
header1[family_index1] = 'Family (Subfamily)'
header1[locality_index1] = 'Locality (Longitude, Latitude, Horizon)'
indices_to_delete1 = sorted([subfamily_index1, longitude_index1, latitude_index1, horizon_index1], reverse=True)
for index1 in indices_to_delete1:
if index1 is not None:
for row1 in data1:
del row1[index1]
new_file_path1 = '/Users/leozhangzaolin/desktop/New_GS_1.csv'
with open(new_file_path1, mode='w', newline='', encoding='utf-8') as file1:
writer1 = csv.writer(file1)
writer1.writerows(data1)
#### read images
import os
import numpy as np
from PIL import Image
import pandas as pd
# Paths
csv_file_path = '/Users/leozhangzaolin/desktop/New_GS_1.csv'
image_dir_paths = ['/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 1',
'/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 2']
npy_save_dir = '/Users/leozhangzaolin/Desktop/arrays'
# Normalize file extensions in the image directories
def normalize_file_extensions(dir_path):
for filename in os.listdir(dir_path):
if filename.lower().endswith('.jpg') and not filename.endswith('.jpg'):
base, ext = os.path.splitext(filename)
new_filename = base + '.jpg'
os.rename(os.path.join(dir_path, filename),
os.path.join(dir_path, new_filename))
for path in image_dir_paths:
normalize_file_extensions(path)
# Load the CSV file
df = pd.read_csv(csv_file_path)
# Function to process and resize the image
def process_image(image_name, save_dir, max_size=(1024, 1024)):
image_base_name = os.path.splitext(image_name)[0]
image_paths = [os.path.join(dir_path, image_base_name + suffix)
for dir_path in image_dir_paths
for suffix in ['_S.jpg', '_S.JPG']]
image_path = next((path for path in image_paths if os.path.exists(path)), None)
if image_path is None:
return None
with Image.open(image_path) as img:
# Resize the image using LANCZOS filter (replacement for ANTIALIAS)
img.thumbnail(max_size, Image.Resampling.LANCZOS)
image_array = np.array(img)
npy_filename = image_base_name + '_S.npy'
npy_path = os.path.join(save_dir, npy_filename)
np.save(npy_path, image_array)
return npy_path
# Create a directory to save the numpy arrays
os.makedirs(npy_save_dir, exist_ok=True)
# Process each image and update the 'image file name' column
df['image file name'] = df['image file name'].apply(lambda x: process_image(x, npy_save_dir))
# Remove rows where the image was not found
df = df.dropna(subset=['image file name'])
# Rename the 'image file name' column to 'image'
df.rename(columns={'image file name': 'image'}, inplace=True)
# Save the updated DataFrame to a new CSV file
updated_csv_path = '/Users/leozhangzaolin/Desktop/New_GS_2_updated_images.csv'
df.to_csv(updated_csv_path, index=False)