# import boto3 | |
# import io | |
import os | |
import pandas as pd | |
import re | |
import shutil | |
import tarfile | |
data_dir = os.path.join(os.getcwd(), "data_units") | |
# output_path = os.getcwd() | |
# species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6", "human"] | |
species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6", "human"] | |
# S3_BUCKET = "aws-hcls-ml" | |
# S3_SRC_PREFIX = "oas-paired-sequence-data/raw" | |
# S3_DEST_PREFIX = "oas-paired-sequence-data/parquet" | |
# s3 = boto3.client("s3") | |
# BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/rat_SD/SRR9179275_paired.csv.gz" | |
BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/" | |
for species in species_list: | |
print(f"Downloading {species} files") | |
# list_of_df = [] | |
species_url_file = os.path.join(data_dir, species + ".txt") | |
with open(species_url_file, "r") as f: | |
i = 0 | |
os.makedirs(species, exist_ok=True) | |
for csv_file in f.readlines(): | |
print(csv_file) | |
filename = os.path.basename(csv_file) | |
run_id = str(re.search(r"^(.*)_[Pp]aired", filename)[1]) | |
url = os.path.join(BASE_URL, species, csv_file) | |
# s3_key = os.path.join(S3_SRC_PREFIX, species, csv_file.strip()) | |
# obj = s3.get_object(Bucket=S3_BUCKET, Key=s3_key) | |
run_data = pd.read_csv( | |
# io.BytesIO(obj["Body"].read()), | |
url, | |
header=1, | |
compression="gzip", | |
on_bad_lines="warn", | |
low_memory=False, | |
) | |
run_data = run_data[ | |
[ | |
"sequence_alignment_aa_heavy", | |
"cdr1_aa_heavy", | |
"cdr2_aa_heavy", | |
"cdr3_aa_heavy", | |
"sequence_alignment_aa_light", | |
"cdr1_aa_light", | |
"cdr2_aa_light", | |
"cdr3_aa_light", | |
] | |
] | |
run_data = run_data.dropna() | |
run_data.insert(0, "data_unit", run_id) | |
print(run_data.shape) | |
output_path = os.path.join(species, "train_" + str(i) + ".parquet") | |
run_data.to_parquet(output_path) | |
i += 1 | |
# list_of_df.append(run_data) | |
# species_df = pd.concat(list_of_df, ignore_index=True) | |
# print(f"{species} output summary:") | |
# print(species_df.head()) | |
# print(species_df.shape) | |
# os.makedirs(species, exist_ok=True) | |
# species_df.to_parquet(species, partition_cols=["data_unit"]) | |
# zip_name = species + ".tar.gz" | |
# print(f"Creating {zip_name}") | |
# with tarfile.open(zip_name, "w:gz") as tf: | |
# tf.add(species, arcname="") | |
# print( | |
# f"Uploading {zip_name} to {os.path.join('s3://', S3_BUCKET, S3_DEST_PREFIX)}" | |
# ) | |
# s3.upload_file(zip_name, S3_BUCKET, os.path.join(S3_DEST_PREFIX, zip_name)) | |
# print(f"Removing {species}") | |
# shutil.rmtree(species) | |
# print(f"Removing {zip_name}") | |
# os.remove(zip_name) | |