EC2 Default User commited on
Commit
de6ec9e
1 Parent(s): a024f41

Try LFS storage

Browse files
.gitattributes CHANGED
@@ -54,3 +54,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  */*.csv filter=lfs diff=lfs merge=lfs -text
 
 
 
 
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
  */*.csv filter=lfs diff=lfs merge=lfs -text
57
+ mouse_BALB_c.tar.gz filter=lfs diff=lfs merge=lfs -text
58
+ mouse_C57BL_6.tar.gz filter=lfs diff=lfs merge=lfs -text
59
+ rat_SD.tar.gz filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -4,6 +4,13 @@ language: en
4
  task_categories:
5
  - fill-mask
6
  license: cc-by-4.0
 
 
 
 
 
 
 
7
  ---
8
 
9
  # Dataset Card for OAS Paired Sequence Data
 
4
  task_categories:
5
  - fill-mask
6
  license: cc-by-4.0
7
+ configs:
8
+ - config_name: rat_SD
9
+ data_files: "rat_SD.tar.gz"
10
+ - config_name: mouse_BALB_c
11
+ data_files: "mouse_BALB_c.tar.gz"
12
+ - config_name: mouse_C57BL_6
13
+ data_files: "mouse_C57BL_6.tar.gz"
14
  ---
15
 
16
  # Dataset Card for OAS Paired Sequence Data
mouse_BALB_c.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:876fdae8478963175654c9d9ed7a8d97847e002b92c3112ee730eddff24118cf
3
+ size 114634
mouse_C57BL_6.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d87e773f30d1ee949d46b2042603b2a060223db87be6309ee22aedbbab227290
3
+ size 130209
oas-data-cleaning.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import boto3
2
+ # import io
3
+ import os
4
+ import pandas as pd
5
+ import re
6
+ import shutil
7
+ import tarfile
8
+
9
+ data_dir = os.path.join(os.getcwd(), "data_units")
10
+ # output_path = os.getcwd()
11
+
12
+ # species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6", "human"]
13
+ species_list = ["rat_SD", "mouse_BALB_c", "mouse_C57BL_6"]
14
+
15
+ # S3_BUCKET = "aws-hcls-ml"
16
+ # S3_SRC_PREFIX = "oas-paired-sequence-data/raw"
17
+ # S3_DEST_PREFIX = "oas-paired-sequence-data/parquet"
18
+ # s3 = boto3.client("s3")
19
+ # BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/rat_SD/SRR9179275_paired.csv.gz"
20
+ BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/"
21
+
22
+ for species in species_list:
23
+ print(f"Downloading {species} files")
24
+ # list_of_df = []
25
+ species_url_file = os.path.join(data_dir, species + ".txt")
26
+ with open(species_url_file, "r") as f:
27
+ i = 0
28
+ os.makedirs(species, exist_ok=True)
29
+ for csv_file in f.readlines():
30
+ print(csv_file)
31
+ filename = os.path.basename(csv_file)
32
+ run_id = str(re.search(r"^(.*)_[Pp]aired", filename)[1])
33
+ url = os.path.join(BASE_URL, species, csv_file)
34
+ # s3_key = os.path.join(S3_SRC_PREFIX, species, csv_file.strip())
35
+ # obj = s3.get_object(Bucket=S3_BUCKET, Key=s3_key)
36
+ run_data = pd.read_csv(
37
+ # io.BytesIO(obj["Body"].read()),
38
+ url,
39
+ header=1,
40
+ compression="gzip",
41
+ on_bad_lines="warn",
42
+ low_memory=False,
43
+ )
44
+ run_data = run_data[
45
+ [
46
+ "sequence_alignment_aa_heavy",
47
+ "cdr1_aa_heavy",
48
+ "cdr2_aa_heavy",
49
+ "cdr3_aa_heavy",
50
+ "sequence_alignment_aa_light",
51
+ "cdr1_aa_light",
52
+ "cdr2_aa_light",
53
+ "cdr3_aa_light",
54
+ ]
55
+ ]
56
+ run_data = run_data.dropna()
57
+ run_data.insert(0, "data_unit", run_id)
58
+ print(run_data.shape)
59
+ output_path = os.path.join(species, "train_" + str(i) + ".parquet")
60
+ run_data.to_parquet(output_path)
61
+ # list_of_df.append(run_data)
62
+ # species_df = pd.concat(list_of_df, ignore_index=True)
63
+ # print(f"{species} output summary:")
64
+ # print(species_df.head())
65
+ # print(species_df.shape)
66
+ # os.makedirs(species, exist_ok=True)
67
+ # species_df.to_parquet(species, partition_cols=["data_unit"])
68
+ zip_name = species + ".tar.gz"
69
+ print(f"Creating {zip_name}")
70
+ with tarfile.open(zip_name, "w:gz") as tf:
71
+ tf.add(species, arcname="")
72
+ # print(
73
+ # f"Uploading {zip_name} to {os.path.join('s3://', S3_BUCKET, S3_DEST_PREFIX)}"
74
+ # )
75
+ # s3.upload_file(zip_name, S3_BUCKET, os.path.join(S3_DEST_PREFIX, zip_name))
76
+ print(f"Removing {species}")
77
+ shutil.rmtree(species)
78
+ # print(f"Removing {zip_name}")
79
+ # os.remove(zip_name)
80
+
oas-paired-sequence-data.py DELETED
@@ -1,138 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
4
- # SPDX-License-Identifier: MIT-0
5
- """Paired sequences from the Observed Antibody Space database"""
6
- import datasets
7
- import os
8
- import csv
9
-
10
- _CITATION = """\
11
- @article{Olsen_Boyles_Deane_2022,
12
- title={Observed Antibody Space: A diverse database of cleaned, annotated, and translated unpaired and paired antibody sequences},
13
- volume={31}, rights={© 2021 The Authors. Protein Science published by Wiley Periodicals LLC on behalf of The Protein Society.},
14
- ISSN={1469-896X}, DOI={10.1002/pro.4205},
15
- number={1}, journal={Protein Science}, author={Olsen, Tobias H. and Boyles, Fergus and Deane, Charlotte M.},
16
- year={2022}, pages={141–146}, language={en} }
17
-
18
- """
19
- _DESCRIPTION = """\
20
- Paired heavy and light chain antibody sequences for multiple species.
21
- """
22
-
23
- _HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
24
-
25
- _LICENSE = "cc-by-4.0"
26
- _BASE_URL = "https://aws-hcls-ml.s3.amazonaws.com/oas-paired-sequence-data/raw/"
27
-
28
- # _URLS = {
29
- # "human": _BASE_URL + "human.tar.gz",
30
- # "rat_SD": _BASE_URL + "rat_SD.tar.gz",
31
- # "mouse_BALB_c": _BASE_URL + "mouse_BALB_c.tar.gz",
32
- # "mouse_C57BL_6": _BASE_URL + "mouse_C57BL_6.tar.gz",
33
- # }
34
- _FEATURES = datasets.Features(
35
- {
36
- "sequence_alignment_aa_heavy": datasets.Value("string"),
37
- "cdr1_aa_heavy": datasets.Value("string"),
38
- "cdr2_aa_heavy": datasets.Value("string"),
39
- "cdr3_aa_heavy": datasets.Value("string"),
40
- "sequence_alignment_aa_light": datasets.Value("string"),
41
- "cdr1_aa_light": datasets.Value("string"),
42
- "cdr2_aa_light": datasets.Value("string"),
43
- "cdr3_aa_light": datasets.Value("string"),
44
- }
45
- )
46
-
47
-
48
- class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
49
- """OAS paired sequence data."""
50
-
51
- VERSION = datasets.Version("1.2.0")
52
- BUILDER_CONFIGS = [
53
- datasets.BuilderConfig(name="human", version=VERSION, description="human"),
54
- datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
55
- datasets.BuilderConfig(
56
- name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
57
- ),
58
- datasets.BuilderConfig(
59
- name="mouse_C57BL_6", version=VERSION, description="mouse_C57BL_6"
60
- ),
61
- ]
62
-
63
- def _info(self):
64
- return datasets.DatasetInfo(
65
- description=_DESCRIPTION,
66
- features=_FEATURES,
67
- homepage=_HOMEPAGE,
68
- license=_LICENSE,
69
- citation=_CITATION,
70
- )
71
-
72
- # def _split_generators(self, dl_manager):
73
- # urls = _URLS[self.config.name]
74
- # data_dir = dl_manager.download_and_extract(urls)
75
- # return [
76
- # datasets.SplitGenerator(
77
- # name=datasets.Split.TRAIN,
78
- # gen_kwargs={
79
- # "filepath": os.path.join(data_dir),
80
- # "split": "train",
81
- # },
82
- # ),
83
- # ]
84
-
85
- # def _generate_examples(self, filepath, split):
86
- # table = pd.read_parquet(filepath)
87
- # for key, row in enumerate(table.itertuples()):
88
- # if key == 0:
89
- # continue
90
- # yield key, {
91
- # "sequence_alignment_aa_heavy": row[1],
92
- # "cdr1_aa_heavy": row[2],
93
- # "cdr2_aa_heavy": row[3],
94
- # "cdr3_aa_heavy": row[4],
95
- # "sequence_alignment_aa_light": row[5],
96
- # "cdr1_aa_light": row[6],
97
- # "cdr2_aa_light": row[7],
98
- # "cdr3_aa_light": row[8],
99
- # }
100
-
101
- def _split_generators(self, dl_manager):
102
- data_unit_file = os.path.join(
103
- os.getcwd(), "data_units", self.config.name + ".txt"
104
- )
105
- with open(data_unit_file, "r") as f:
106
- urls = [
107
- os.path.join(_BASE_URL, self.config.name, line.strip()) for line in f
108
- ]
109
- data_files = dl_manager.download_and_extract(urls)
110
- return [
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TRAIN,
113
- gen_kwargs={
114
- "filepath": data_files,
115
- "split": "train",
116
- },
117
- ),
118
- ]
119
-
120
- def _generate_examples(self, filepaths):
121
- for filepath in filepaths:
122
- with open(filepath, "r") as f:
123
- reader = csv.reader(f, delimiter=",")
124
- for key, row in enumerate(reader):
125
- if key < 2:
126
- continue
127
- else:
128
- yield key - 2, {
129
- "sequence_alignment_aa_heavy": row[14],
130
- "cdr1_aa_heavy": row[37],
131
- "cdr2_aa_heavy": row[41],
132
- "cdr3_aa_heavy": row[47],
133
- "sequence_alignment_aa_light": row[113],
134
- "cdr1_aa_light": row[136],
135
- "cdr2_aa_light": row[140],
136
- "cdr3_aa_light": row[146],
137
- }
138
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
rat_SD.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c9af9dc3c63df41e091a9d847dd8aa95fdb0d73d81024516916b78c25ee1722
3
+ size 176753