EC2 Default User commited on
Commit
f7d009d
1 Parent(s): 4e30a61

Add load script

Browse files
human.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:385678cd991b8639196d97b51f1f2396d32ced223bdacd60ffb806cf3b568701
3
- size 92818695
 
 
 
 
mouse_BALB_c.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aadc6d7524d60d80109143772bc307bae06b8aabace45c87bdd2ca3aa7dd777f
3
- size 586744
 
 
 
 
mouse_C57BL_6.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f46d6e00e6bde03b91675d2ba03dcb3c6f67c944ae8dd9606a6c373a07c61ae3
3
- size 174681
 
 
 
 
oas-paired-sequence-data.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """Paired sequences from the Observed Antibody Space database"""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import pandas as pd
22
+
23
+ import datasets
24
+
25
+ _CITATION = """\
26
+ @article{Olsen_Boyles_Deane_2022,
27
+ title={Observed Antibody Space: A diverse database of cleaned, annotated, and translated unpaired and paired antibody sequences},
28
+ volume={31}, rights={© 2021 The Authors. Protein Science published by Wiley Periodicals LLC on behalf of The Protein Society.},
29
+ ISSN={1469-896X}, DOI={10.1002/pro.4205},
30
+ number={1}, journal={Protein Science}, author={Olsen, Tobias H. and Boyles, Fergus and Deane, Charlotte M.},
31
+ year={2022}, pages={141–146}, language={en} }
32
+
33
+ """
34
+ _DESCRIPTION = """\
35
+ Paired heavy and light chain antibody sequences for multiple species.
36
+ """
37
+
38
+ _HOMEPAGE = "https://opig.stats.ox.ac.uk/webapps/oas/"
39
+
40
+ _LICENSE = "cc-by-4.0"
41
+
42
+ _URLS = {
43
+ "human": "human.parquet",
44
+ "rat_SD": "rat_SD.parquet",
45
+ "mouse_BALB_c": "mouse_BALB_c.parquet",
46
+ "mouse_C57BL_6": "mouse_C57BL_6.parquet",
47
+ }
48
+
49
+ _FEATURES = datasets.Features(
50
+ {
51
+ "pair_id": datasets.Value("string"),
52
+ "sequence_alignment_aa_heavy": datasets.Value("string"),
53
+ "cdr1_aa_heavy": datasets.Value("string"),
54
+ "cdr2_aa_heavy": datasets.Value("string"),
55
+ "cdr3_aa_heavy": datasets.Value("string"),
56
+ "sequence_alignment_aa_light": datasets.Value("string"),
57
+ "cdr1_aa_light": datasets.Value("string"),
58
+ "cdr2_aa_light": datasets.Value("string"),
59
+ "cdr3_aa_light": datasets.Value("string"),
60
+ }
61
+ )
62
+
63
+
64
+ class OasPairedSequenceData(datasets.GeneratorBasedBuilder):
65
+ """OAS paired sequence data."""
66
+
67
+ VERSION = datasets.Version("1.1.0")
68
+
69
+ # You will be able to load one or the other configurations in the following list with
70
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
71
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
72
+ BUILDER_CONFIGS = [
73
+ datasets.BuilderConfig(name="human", version=VERSION, description="Human"),
74
+ datasets.BuilderConfig(name="rat_SD", version=VERSION, description="rat_SD"),
75
+ datasets.BuilderConfig(
76
+ name="mouse_BALB_c", version=VERSION, description="mouse_BALB_c"
77
+ ),
78
+ datasets.BuilderConfig(
79
+ name="mouse_C57BL_6", version=VERSION, description="mouse_C57BL_6"
80
+ ),
81
+ ]
82
+
83
+ def _info(self):
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=_FEATURES,
87
+ homepage=_HOMEPAGE,
88
+ license=_LICENSE,
89
+ citation=_CITATION,
90
+ )
91
+
92
+ def _split_generators(self, dl_manager):
93
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
94
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
95
+
96
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
97
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
98
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
99
+ urls = _URLS[self.config.name]
100
+ data = dl_manager.download_and_extract(urls)
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TRAIN,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "filepath": data,
107
+ "split": "train",
108
+ },
109
+ ),
110
+ # datasets.SplitGenerator(
111
+ # name=datasets.Split.VALIDATION,
112
+ # # These kwargs will be passed to _generate_examples
113
+ # gen_kwargs={
114
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
115
+ # "split": "dev",
116
+ # },
117
+ # ),
118
+ # datasets.SplitGenerator(
119
+ # name=datasets.Split.TEST,
120
+ # # These kwargs will be passed to _generate_examples
121
+ # gen_kwargs={
122
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
123
+ # "split": "test",
124
+ # },
125
+ # ),
126
+ ]
127
+
128
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
129
+ def _generate_examples(self, filepath, split):
130
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
131
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
132
+
133
+ with open(filepath, newline="") as f:
134
+ reader = csv.reader(f, delimiter=",")
135
+ for key, row in enumerate(reader):
136
+ if key == 0:
137
+ continue
138
+ yield key, {
139
+ "pair_id": row[0],
140
+ "sequence_alignment_aa_heavy": row[1],
141
+ "cdr1_aa_heavy": row[2],
142
+ "cdr2_aa_heavy": row[3],
143
+ "cdr3_aa_heavy": row[4],
144
+ "sequence_alignment_aa_light": row[5],
145
+ "cdr1_aa_light": row[6],
146
+ "cdr2_aa_light": row[7],
147
+ "cdr3_aa_light": row[8],
148
+ }
149
+
rat_SD.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b60b14ea7fe3c0216f8640b6cacd1e63543e15663e308d6998a8c6a0eac87e8
3
- size 2053567