File size: 2,456 Bytes
6aae2cf 373eddf 8e17b21 75e52b5 6aae2cf 373eddf 8e17b21 373eddf 6aae2cf 373eddf 0d959d7 8f67062 75e52b5 0236f5a 8f67062 373eddf 8e17b21 dd4dc1f 8f67062 373eddf 8e17b21 8f67062 8e17b21 8f67062 373eddf 0236f5a 373eddf 8e17b21 373eddf dd4dc1f 373eddf dd4dc1f 373eddf 23dc9d7 75e52b5 dd4dc1f 8e17b21 7304bbb 0ee293f 8f67062 0ee293f 7304bbb 8f67062 7304bbb 8f67062 0d959d7 373eddf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
import boto3
import io
import os
import pandas as pd
import re
import shutil
import tarfile
data_dir = os.getcwd()
output_path = os.getcwd()
species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6", "human"]
S3_BUCKET = "aws-hcls-ml"
S3_SRC_PREFIX = "oas-paired-sequence-data/raw"
S3_DEST_PREFIX = "oas-paired-sequence-data/parquet"
s3 = boto3.client("s3")
for species in species_list:
print(f"Downloading {species} files")
list_of_df = []
species_url_file = os.path.join(data_dir, species + "_oas_data_units.txt")
with open(species_url_file, "r") as f:
for csv_file in f.readlines():
print(csv_file)
filename = os.path.basename(csv_file)
run_id = str(re.search(r"^(.*)_[Pp]aired", filename)[1])
s3_key = os.path.join(S3_SRC_PREFIX, species, csv_file.strip())
obj = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)
run_data = pd.read_csv(
io.BytesIO(obj["Body"].read()),
header=1,
compression="gzip",
on_bad_lines="warn",
low_memory=False,
)
run_data = run_data[
[
"sequence_alignment_aa_heavy",
"cdr1_aa_heavy",
"cdr2_aa_heavy",
"cdr3_aa_heavy",
"sequence_alignment_aa_light",
"cdr1_aa_light",
"cdr2_aa_light",
"cdr3_aa_light",
]
]
run_data = run_data.dropna()
run_data.insert(0, "data_unit", run_id)
list_of_df.append(run_data)
species_df = pd.concat(list_of_df, ignore_index=True)
print(f"{species} output summary:")
print(species_df.head())
print(species_df.shape)
os.makedirs(species, exist_ok=True)
species_df.to_parquet(species, partition_cols=["data_unit"])
zip_name = species + ".tar.gz"
print(f"Creating {zip_name}")
with tarfile.open(zip_name, "w:gz") as tf:
tf.add(species, arcname="")
print(
f"Uploading {zip_name} to {os.path.join('s3://', S3_BUCKET, S3_DEST_PREFIX)}"
)
s3.upload_file(zip_name, S3_BUCKET, os.path.join(S3_DEST_PREFIX, zip_name))
print(f"Removing {species}")
shutil.rmtree(species)
print(f"Removing {zip_name}")
os.remove(zip_name)
|