File size: 2,335 Bytes
373eddf
 
 
 
 
 
50f055e
373eddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50f055e
373eddf
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import pandas as pd
import os
from datasets import load_dataset

data_dir = os.path.join(os.getcwd(), "data")

species_list = ["human", "rat_SD", "mouse_BALB_c", "mouse_C57BL_6"]

for species in species_list:
    species_url_file = os.path.join(os.getcwd(), species + "_oas_paired.txt")
    with open(species_url_file, "r") as f:
        for csv_file in f.readlines():
            print(csv_file)
            filename = os.path.basename(csv_file)
            filename = os.path.splitext(filename)[0]
            filename = os.path.splitext(filename)[0]  # Not a typo, need to pull off the .gzip AND the .csv
            df = pd.read_csv(
                csv_file,
                header=1,
                compression="gzip",
                on_bad_lines="warn",
            )
            df = df[
                [
                    "sequence_id_heavy",
                    "sequence_heavy",
                    "locus_heavy",
                    "stop_codon_heavy",
                    "productive_heavy",
                    "rev_comp_heavy",
                    "sequence_alignment_aa_heavy",
                    "fwr1_aa_heavy",
                    "cdr1_aa_heavy",
                    "fwr2_aa_heavy",
                    "cdr2_aa_heavy",
                    "fwr3_aa_heavy",
                    "cdr3_aa_heavy",
                    "junction_aa_heavy",
                    "sequence_id_light",
                    "sequence_light",
                    "locus_light",
                    "stop_codon_light",
                    "productive_light",
                    "rev_comp_light",
                    "sequence_alignment_aa_light",
                    "fwr1_aa_light",
                    "cdr1_aa_light",
                    "fwr2_aa_light",
                    "cdr2_aa_light",
                    "fwr3_aa_light",
                    "cdr3_aa_light",
                    "junction_aa_light",
                ]
            ]
            df.insert(0, "run", species)
            output_path = os.path.join(data_dir, species)
            if not os.path.exists(output_path):
                os.makedirs(output_path, exist_ok=True)
            df.to_parquet(
                os.path.join(output_path, filename + ".parquet"), compression="gzip"
            )

        x = load_dataset(output_path)
        print(x.num_rows)