add deepcoy DEKOIS and DUDE predictions
Browse files- combine_predictions.py +10 -0
- data/deepcoy_dekois_predict.parquet +3 -0
- data/deepcoy_dude_predict.parquet +3 -0
- deepcoy_combine.py +49 -0
combine_predictions.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dask.dataframe as dd
|
2 |
+
import sys
|
3 |
+
|
4 |
+
if __name__ == '__main__':
|
5 |
+
import glob
|
6 |
+
|
7 |
+
filenames = glob.glob(sys.argv[2])
|
8 |
+
|
9 |
+
ddf = dd.read_parquet(filenames)
|
10 |
+
ddf.compute().to_parquet(sys.argv[1])
|
data/deepcoy_dekois_predict.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c50a7887a034c148c07ed24a6eba35e04e1319fd9e62a7364fa68ace63fedcab
|
3 |
+
size 3597858
|
data/deepcoy_dude_predict.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81412340663965b4495b9d13961ca9b86bd65b02b01ec11d218faafbd814e257
|
3 |
+
size 38819110
|
deepcoy_combine.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dask.dataframe as dd
|
2 |
+
import pandas as pd
|
3 |
+
import sys
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
from Bio.PDB import PDBList
|
8 |
+
from Bio import SeqIO
|
9 |
+
|
10 |
+
import warnings
|
11 |
+
|
12 |
+
def get_sequence(pdb_id):
|
13 |
+
try:
|
14 |
+
pdbfile = PDBList().retrieve_pdb_file(pdb_id.upper(),file_format='pdb',pdir='/tmp')
|
15 |
+
seq = str(next(SeqIO.parse(pdbfile, "pdb-seqres")).seq)
|
16 |
+
os.unlink(pdbfile)
|
17 |
+
|
18 |
+
return seq
|
19 |
+
except Exception as e:
|
20 |
+
print(e)
|
21 |
+
pass
|
22 |
+
|
23 |
+
if __name__ == '__main__':
|
24 |
+
import glob
|
25 |
+
|
26 |
+
filenames = glob.glob(sys.argv[3])
|
27 |
+
|
28 |
+
seqs = []
|
29 |
+
smiles = []
|
30 |
+
active = []
|
31 |
+
|
32 |
+
targets = pd.read_csv(sys.argv[1],sep=' ',keep_default_na=False)
|
33 |
+
for fn in filenames:
|
34 |
+
df = pd.read_csv(fn,header=None,sep=' ')
|
35 |
+
actives = df[0].unique()
|
36 |
+
decoys = df[1].unique()
|
37 |
+
smiles += actives.tolist()+decoys.tolist()
|
38 |
+
active += [True]*len(actives) + [False]*len(decoys)
|
39 |
+
split = os.path.basename(fn).split('-')
|
40 |
+
target = split[2].upper()
|
41 |
+
if len(split) > 5:
|
42 |
+
target += '-'+split[3].upper()
|
43 |
+
print(target)
|
44 |
+
seq = get_sequence(targets[targets.name.str.upper()==target].pdb.values[0])
|
45 |
+
seqs += [seq]*(len(actives)+len(decoys))
|
46 |
+
|
47 |
+
ddf = dd.from_pandas(pd.DataFrame({'seq': seqs, 'smiles': smiles, 'active': active}),npartitions=1)
|
48 |
+
ddf = ddf.repartition(partition_size='1M')
|
49 |
+
ddf.to_parquet(sys.argv[2])
|