EC2 Default User
commited on
Commit
•
6aae2cf
1
Parent(s):
75e52b5
cleanup
Browse files- oas-paired-sequence-data.py +4 -55
- src/oas-data-cleaning.py +4 -24
oas-paired-sequence-data.py
CHANGED
@@ -3,9 +3,8 @@
|
|
3 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
4 |
# SPDX-License-Identifier: MIT-0
|
5 |
"""Paired sequences from the Observed Antibody Space database"""
|
6 |
-
# import csv
|
7 |
-
import pandas as pd
|
8 |
import datasets
|
|
|
9 |
import os
|
10 |
|
11 |
_CITATION = """\
|
@@ -24,19 +23,16 @@ Paired heavy and light chain antibody sequences for multiple species.
|
|
24 |
_HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
|
25 |
|
26 |
_LICENSE = "cc-by-4.0"
|
27 |
-
# _BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/processed/"
|
28 |
_BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/parquet/"
|
29 |
|
30 |
_URLS = {
|
31 |
-
|
32 |
"rat_SD": _BASE_URL + "rat_SD.tar.gz",
|
33 |
"mouse_BALB_c": _BASE_URL + "mouse_BALB_c.tar.gz",
|
34 |
-
"mouse_C57BL_6": _BASE_URL + "mouse_C57BL_6.tar.gz",
|
35 |
}
|
36 |
_FEATURES = datasets.Features(
|
37 |
{
|
38 |
-
# "pair_id": datasets.Value("string"),
|
39 |
-
"data_unit": datasets.Value("string"),
|
40 |
"sequence_alignment_aa_heavy": datasets.Value("string"),
|
41 |
"cdr1_aa_heavy": datasets.Value("string"),
|
42 |
"cdr2_aa_heavy": datasets.Value("string"),
|
@@ -45,18 +41,6 @@ _FEATURES = datasets.Features(
|
|
45 |
"cdr1_aa_light": datasets.Value("string"),
|
46 |
"cdr2_aa_light": datasets.Value("string"),
|
47 |
"cdr3_aa_light": datasets.Value("string"),
|
48 |
-
# "cdr1_aa_heavy_start": datasets.Value("int16"),
|
49 |
-
# "cdr1_aa_heavy_end": datasets.Value("int16"),
|
50 |
-
# "cdr1_aa_light_start": datasets.Value("int16"),
|
51 |
-
# "cdr1_aa_light_end": datasets.Value("int16"),
|
52 |
-
# "cdr2_aa_heavy_start": datasets.Value("int16"),
|
53 |
-
# "cdr2_aa_heavy_end": datasets.Value("int16"),
|
54 |
-
# "cdr2_aa_light_start": datasets.Value("int16"),
|
55 |
-
# "cdr2_aa_light_end": datasets.Value("int16"),
|
56 |
-
# "cdr3_aa_heavy_start": datasets.Value("int16"),
|
57 |
-
# "cdr3_aa_heavy_end": datasets.Value("int16"),
|
58 |
-
# "cdr3_aa_light_start": datasets.Value("int16"),
|
59 |
-
# "cdr3_aa_light_end": datasets.Value("int16"),
|
60 |
}
|
61 |
)
|
62 |
|
@@ -66,7 +50,7 @@ class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
|
|
66 |
|
67 |
VERSION = datasets.Version("1.2.0")
|
68 |
BUILDER_CONFIGS = [
|
69 |
-
|
70 |
datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
|
71 |
datasets.BuilderConfig(
|
72 |
name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
|
@@ -92,13 +76,11 @@ class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
|
|
92 |
datasets.SplitGenerator(
|
93 |
name=datasets.Split.TRAIN,
|
94 |
gen_kwargs={
|
95 |
-
# "filepath": os.path.join(data_dir, "train.csv"),
|
96 |
"filepath": os.path.join(data_dir),
|
97 |
"split": "train",
|
98 |
},
|
99 |
),
|
100 |
]
|
101 |
-
|
102 |
|
103 |
def _generate_examples(self, filepath, split):
|
104 |
table = pd.read_parquet(filepath)
|
@@ -106,8 +88,6 @@ class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
|
|
106 |
if key == 0:
|
107 |
continue
|
108 |
yield key, {
|
109 |
-
# "pair_id": row[0],
|
110 |
-
# "data_unit": row[0],
|
111 |
"sequence_alignment_aa_heavy": row[1],
|
112 |
"cdr1_aa_heavy": row[2],
|
113 |
"cdr2_aa_heavy": row[3],
|
@@ -118,34 +98,3 @@ class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
|
|
118 |
"cdr3_aa_light": row[8],
|
119 |
}
|
120 |
|
121 |
-
|
122 |
-
# def _generate_examples(self, filepath, split):
|
123 |
-
# with open(filepath, newline="") as f:
|
124 |
-
# reader = csv.reader(f, delimiter=",")
|
125 |
-
# for key, row in enumerate(reader):
|
126 |
-
# if key == 0:
|
127 |
-
# continue
|
128 |
-
# yield key, {
|
129 |
-
# "pair_id": row[0],
|
130 |
-
# "sequence_alignment_aa_heavy": row[1],
|
131 |
-
# "cdr1_aa_heavy": row[2],
|
132 |
-
# "cdr2_aa_heavy": row[3],
|
133 |
-
# "cdr3_aa_heavy": row[4],
|
134 |
-
# "sequence_alignment_aa_light": row[5],
|
135 |
-
# "cdr1_aa_light": row[6],
|
136 |
-
# "cdr2_aa_light": row[7],
|
137 |
-
# "cdr3_aa_light": row[8],
|
138 |
-
# # "cdr1_aa_heavy_start": row[9],
|
139 |
-
# # "cdr1_aa_heavy_end": row[10],
|
140 |
-
# # "cdr1_aa_light_start": row[11],
|
141 |
-
# # "cdr1_aa_light_end": row[12],
|
142 |
-
# # "cdr2_aa_heavy_start": row[13],
|
143 |
-
# # "cdr2_aa_heavy_end": row[14],
|
144 |
-
# # "cdr2_aa_light_start": row[15],
|
145 |
-
# # "cdr2_aa_light_end": row[16],
|
146 |
-
# # "cdr3_aa_heavy_start": row[17],
|
147 |
-
# # "cdr3_aa_heavy_end": row[18],
|
148 |
-
# # "cdr3_aa_light_start": row[19],
|
149 |
-
# # "cdr3_aa_light_end": row[20],
|
150 |
-
# }
|
151 |
-
|
|
|
3 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
4 |
# SPDX-License-Identifier: MIT-0
|
5 |
"""Paired sequences from the Observed Antibody Space database"""
|
|
|
|
|
6 |
import datasets
|
7 |
+
import pandas as pd
|
8 |
import os
|
9 |
|
10 |
_CITATION = """\
|
|
|
23 |
_HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
|
24 |
|
25 |
_LICENSE = "cc-by-4.0"
|
|
|
26 |
_BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/parquet/"
|
27 |
|
28 |
_URLS = {
|
29 |
+
"human": _BASE_URL + "human.tar.gz",
|
30 |
"rat_SD": _BASE_URL + "rat_SD.tar.gz",
|
31 |
"mouse_BALB_c": _BASE_URL + "mouse_BALB_c.tar.gz",
|
32 |
+
"mouse_C57BL_6": _BASE_URL + "mouse_C57BL_6.tar.gz",
|
33 |
}
|
34 |
_FEATURES = datasets.Features(
|
35 |
{
|
|
|
|
|
36 |
"sequence_alignment_aa_heavy": datasets.Value("string"),
|
37 |
"cdr1_aa_heavy": datasets.Value("string"),
|
38 |
"cdr2_aa_heavy": datasets.Value("string"),
|
|
|
41 |
"cdr1_aa_light": datasets.Value("string"),
|
42 |
"cdr2_aa_light": datasets.Value("string"),
|
43 |
"cdr3_aa_light": datasets.Value("string"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
}
|
45 |
)
|
46 |
|
|
|
50 |
|
51 |
VERSION = datasets.Version("1.2.0")
|
52 |
BUILDER_CONFIGS = [
|
53 |
+
datasets.BuilderConfig(name="human", version=VERSION, description="Human"),
|
54 |
datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
|
55 |
datasets.BuilderConfig(
|
56 |
name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
|
|
|
76 |
datasets.SplitGenerator(
|
77 |
name=datasets.Split.TRAIN,
|
78 |
gen_kwargs={
|
|
|
79 |
"filepath": os.path.join(data_dir),
|
80 |
"split": "train",
|
81 |
},
|
82 |
),
|
83 |
]
|
|
|
84 |
|
85 |
def _generate_examples(self, filepath, split):
|
86 |
table = pd.read_parquet(filepath)
|
|
|
88 |
if key == 0:
|
89 |
continue
|
90 |
yield key, {
|
|
|
|
|
91 |
"sequence_alignment_aa_heavy": row[1],
|
92 |
"cdr1_aa_heavy": row[2],
|
93 |
"cdr2_aa_heavy": row[3],
|
|
|
98 |
"cdr3_aa_light": row[8],
|
99 |
}
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/oas-data-cleaning.py
CHANGED
@@ -1,18 +1,15 @@
|
|
|
|
|
|
1 |
import os
|
2 |
import pandas as pd
|
3 |
import re
|
4 |
-
|
5 |
-
# from zipfile import ZipFile
|
6 |
-
import tarfile
|
7 |
-
import boto3
|
8 |
-
import io
|
9 |
import shutil
|
|
|
10 |
|
11 |
data_dir = os.getcwd()
|
12 |
output_path = os.getcwd()
|
13 |
|
14 |
-
|
15 |
-
species_list = ["human"]
|
16 |
|
17 |
S3_BUCKET = "aws-hcls-ml"
|
18 |
S3_SRC_PREFIX = "oas-paired-sequence-data/raw"
|
@@ -50,40 +47,23 @@ for species in species_list:
|
|
50 |
]
|
51 |
]
|
52 |
run_data = run_data.dropna()
|
53 |
-
# run_data = run_data.apply(calc_cdr_coordinates, axis=1)
|
54 |
-
# run_data.insert(
|
55 |
-
# 0, "pair_id", run_id + "_" + run_data.reset_index().index.map(str)
|
56 |
-
# )
|
57 |
run_data.insert(0, "data_unit", run_id)
|
58 |
list_of_df.append(run_data)
|
59 |
species_df = pd.concat(list_of_df, ignore_index=True)
|
60 |
print(f"{species} output summary:")
|
61 |
print(species_df.head())
|
62 |
print(species_df.shape)
|
63 |
-
# output_file_name = os.path.join(output_path, "train.csv")
|
64 |
-
# output_file_name = os.path.join(output_path, "train.parquet")
|
65 |
-
# print(f"Creating {output_file_name}")
|
66 |
parquet_dir = os.path.join(output_path, species)
|
67 |
os.makedirs(parquet_dir, exist_ok=True)
|
68 |
species_df.to_parquet(parquet_dir, partition_cols=["data_unit"])
|
69 |
-
# species_df.to_csv(output_file_name, index=False, compression="zip")
|
70 |
-
# species_df.to_csv(output_file_name, index=False)
|
71 |
-
# species_df.to_csv(output_file_name, index=False)
|
72 |
-
# zip_name = species + ".zip"
|
73 |
zip_name = species + ".tar.gz"
|
74 |
print(f"Creating {zip_name}")
|
75 |
-
# with ZipFile(zip_name, "w") as myzip:
|
76 |
-
# myzip.write("train.csv")
|
77 |
-
|
78 |
with tarfile.open(zip_name, "w:gz") as tf:
|
79 |
-
# tf.add("train.csv")
|
80 |
tf.add(parquet_dir)
|
81 |
print(
|
82 |
f"Uploading {zip_name} to {os.path.join('s3://', S3_BUCKET, S3_DEST_PREFIX)}"
|
83 |
)
|
84 |
s3.upload_file(zip_name, S3_BUCKET, os.path.join(S3_DEST_PREFIX, zip_name))
|
85 |
-
# print(f"Removing {output_file_name}")
|
86 |
-
# os.remove(output_file_name)
|
87 |
print(f"Removing {parquet_dir}")
|
88 |
shutil.rmtree(parquet_dir)
|
89 |
print(f"Removing {zip_name}")
|
|
|
1 |
+
import boto3
|
2 |
+
import io
|
3 |
import os
|
4 |
import pandas as pd
|
5 |
import re
|
|
|
|
|
|
|
|
|
|
|
6 |
import shutil
|
7 |
+
import tarfile
|
8 |
|
9 |
data_dir = os.getcwd()
|
10 |
output_path = os.getcwd()
|
11 |
|
12 |
+
species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6", "human"]
|
|
|
13 |
|
14 |
S3_BUCKET = "aws-hcls-ml"
|
15 |
S3_SRC_PREFIX = "oas-paired-sequence-data/raw"
|
|
|
47 |
]
|
48 |
]
|
49 |
run_data = run_data.dropna()
|
|
|
|
|
|
|
|
|
50 |
run_data.insert(0, "data_unit", run_id)
|
51 |
list_of_df.append(run_data)
|
52 |
species_df = pd.concat(list_of_df, ignore_index=True)
|
53 |
print(f"{species} output summary:")
|
54 |
print(species_df.head())
|
55 |
print(species_df.shape)
|
|
|
|
|
|
|
56 |
parquet_dir = os.path.join(output_path, species)
|
57 |
os.makedirs(parquet_dir, exist_ok=True)
|
58 |
species_df.to_parquet(parquet_dir, partition_cols=["data_unit"])
|
|
|
|
|
|
|
|
|
59 |
zip_name = species + ".tar.gz"
|
60 |
print(f"Creating {zip_name}")
|
|
|
|
|
|
|
61 |
with tarfile.open(zip_name, "w:gz") as tf:
|
|
|
62 |
tf.add(parquet_dir)
|
63 |
print(
|
64 |
f"Uploading {zip_name} to {os.path.join('s3://', S3_BUCKET, S3_DEST_PREFIX)}"
|
65 |
)
|
66 |
s3.upload_file(zip_name, S3_BUCKET, os.path.join(S3_DEST_PREFIX, zip_name))
|
|
|
|
|
67 |
print(f"Removing {parquet_dir}")
|
68 |
shutil.rmtree(parquet_dir)
|
69 |
print(f"Removing {zip_name}")
|