File size: 4,669 Bytes
8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 e012aec 8be8808 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
#### This code shows how I process the data and transfer the images to Numpy arrays on local. After processing, I upload the final csv to github and get the URL.
import csv
import os
import numpy as np
from PIL import Image
import pandas as pd
# --- Initial Setup ---
initial_csv_file_path = 'https://github.com/LeoZhangzaolin/photos/blob/main/Graptolite%20specimens.csv'
columns_to_delete = [
"species ID", "Phylum", "Class", "Order", "revised species name",
"total number of specimens", "specimens Serial No", "显微镜照片数量", "SLR photo No",
"相机照片数量", "跑数据照片总数", "备注", "age from", "age to", "collection No", "Microscrope photo No"
]
# --- Read and Process CSV Data ---
with open(initial_csv_file_path, newline='', encoding='utf-8') as file:
reader = csv.reader(file)
data = list(reader)
header = data[0]
# Find indices for columns to merge
family_index = header.index('Family') if 'Family' in header else None
subfamily_index = header.index('Subfamily') if 'Subfamily' in header else None
locality_index = header.index('Locality') if 'Locality' in header else None
longitude_index = header.index('Longitude') if 'Longitude' in header else None
latitude_index = header.index('Latitude') if 'Latitude' in header else None
horizon_index = header.index('Horizon') if 'Horizon' in header else None
# Process rows: merge and delete columns
for row in data[1:]:
# Merge columns
if family_index is not None and subfamily_index is not None:
family = row[family_index]
subfamily = row[subfamily_index] if row[subfamily_index] else 'no subfamily'
row[family_index] = f"{family} ({subfamily})"
if locality_index is not None and all([longitude_index, latitude_index, horizon_index]):
locality = row[locality_index]
longitude = row[longitude_index]
latitude = row[latitude_index]
horizon = row[horizon_index]
row[locality_index] = f"{locality} ({longitude}, {latitude}, {horizon})"
# Update header and remove unneeded columns
header[family_index] = 'Family (Subfamily)'
header[locality_index] = 'Locality (Longitude, Latitude, Horizon)'
indices_to_delete = [header.index(column) for column in columns_to_delete if column in header]
merged_indices = [subfamily_index, longitude_index, latitude_index, horizon_index]
indices_to_delete.extend(merged_indices)
indices_to_delete = list(set(indices_to_delete))
indices_to_delete.sort(reverse=True)
header = [col for i, col in enumerate(header) if i not in indices_to_delete]
for row in data[1:]:
for index in indices_to_delete:
del row[index]
# Convert processed data into a DataFrame
df = pd.DataFrame(data[1:], columns=header)
# --- Image Processing ---
# Image directories
image_dir_paths = ['/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 1',
'/Users/leozhangzaolin/Desktop/project 1/graptolite specimens with scale 2']
# Normalize file extensions in the image directories
def normalize_file_extensions(dir_path):
for filename in os.listdir(dir_path):
if filename.lower().endswith('.jpg') and not filename.endswith('.jpg'):
base, ext = os.path.splitext(filename)
new_filename = base + '.jpg'
os.rename(os.path.join(dir_path, filename), os.path.join(dir_path, new_filename))
for path in image_dir_paths:
normalize_file_extensions(path)
# Function to process and return the image array
def process_image_array(image_name, max_size=(1024, 1024)):
image_base_name = os.path.splitext(image_name)[0]
image_paths = [os.path.join(dir_path, image_base_name + suffix)
for dir_path in image_dir_paths
for suffix in ['_S.jpg', '_S.JPG']]
image_path = next((path for path in image_paths if os.path.exists(path)), None)
if image_path is None:
return None
with Image.open(image_path) as img:
img.thumbnail(max_size, Image.Resampling.LANCZOS)
return np.array(img)
# Apply the function to embed image arrays in the 'image file name' column
df['image file name'] = df['image file name'].apply(process_image_array)
df = df.dropna(subset=['image file name'])
# Since arrays can't be directly saved in CSV, convert them to a string representation
df['image file name'] = df['image file name'].apply(lambda x: np.array2string(x))
# Rename the 'image file name' column to 'image'
df.rename(columns={'image file name': 'image'}, inplace=True)
# --- Save the Final DataFrame to a CSV File ---
final_csv_path = '/Users/leozhangzaolin/Desktop/Final_GS_with_Images.csv'
df.to_csv(final_csv_path, index=False)
|