import pandas as pd import os from datasets import load_dataset data_dir = os.path.join(os.getcwd(), "data") species_list = ["human", "rat_SD", "mouse_BALB_c", "mouse_C57BL_6"] for species in species_list: species_url_file = os.path.join(os.getcwd(), species + "_oas_paired.txt") with open(species_url_file, "r") as f: for csv_file in f.readlines(): print(csv_file) filename = os.path.basename(csv_file) filename = os.path.splitext(filename)[0] filename = os.path.splitext(filename)[0] # Not a typo, need to pull off the .gzip AND the .csv df = pd.read_csv( csv_file, header=1, compression="gzip", on_bad_lines="warn", ) df = df[ [ "sequence_id_heavy", "sequence_heavy", "locus_heavy", "stop_codon_heavy", "productive_heavy", "rev_comp_heavy", "sequence_alignment_aa_heavy", "fwr1_aa_heavy", "cdr1_aa_heavy", "fwr2_aa_heavy", "cdr2_aa_heavy", "fwr3_aa_heavy", "cdr3_aa_heavy", "junction_aa_heavy", "sequence_id_light", "sequence_light", "locus_light", "stop_codon_light", "productive_light", "rev_comp_light", "sequence_alignment_aa_light", "fwr1_aa_light", "cdr1_aa_light", "fwr2_aa_light", "cdr2_aa_light", "fwr3_aa_light", "cdr3_aa_light", "junction_aa_light", ] ] df.insert(0, "run", species) output_path = os.path.join(data_dir, species) if not os.path.exists(output_path): os.makedirs(output_path, exist_ok=True) df.to_parquet( os.path.join(output_path, filename + ".parquet"), compression="gzip" ) x = load_dataset(output_path) print(x.num_rows)