pha_clustered_protein_complexes / cluster_landscapes_v3.py
AmelieSchreiber's picture
Update cluster_landscapes_v3.py
44efbd5
raw
history blame
5.98 kB
import pandas as pd
import numpy as np
from transformers import EsmModel, AutoTokenizer
import torch
from scipy.spatial.distance import pdist, squareform
from gudhi import RipsComplex
from gudhi.representations.vector_methods import Landscape
from sklearn.cluster import DBSCAN
# import matplotlib.pyplot as plt
from tqdm import tqdm
# Define a helper function for hidden states
def get_hidden_states(sequence, tokenizer, model, layer):
model.config.output_hidden_states = True
encoded_input = tokenizer([sequence], return_tensors='pt', padding=True, truncation=True, max_length=1024)
with torch.no_grad():
model_output = model(**encoded_input)
hidden_states = model_output.hidden_states
specific_hidden_states = hidden_states[layer][0]
return specific_hidden_states.numpy()
# Define a helper function for Euclidean distance matrix
def compute_euclidean_distance_matrix(hidden_states):
euclidean_distances = pdist(hidden_states, metric='euclidean')
euclidean_distance_matrix = squareform(euclidean_distances)
return euclidean_distance_matrix
# Define a helper function for persistent homology
def compute_persistent_homology(distance_matrix, max_dimension=0):
max_edge_length = np.max(distance_matrix)
rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=max_edge_length)
st = rips_complex.create_simplex_tree(max_dimension=max_dimension)
st.persistence()
persistence_pairs = np.array([[p[1][0], p[1][1]] for p in st.persistence() if p[0] == 0 and p[1][1] < np.inf]) # Filter out infinite death times
return st, persistence_pairs
# Define a helper function for persistent homology
def compute_persistent_homology2(distance_matrix, max_dimension=0):
max_edge_length = np.max(distance_matrix)
rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=max_edge_length)
st = rips_complex.create_simplex_tree(max_dimension=max_dimension)
st.persistence()
return st, st.persistence()
# Define a helper function for Landscape transformations with tqdm
#def compute_landscapes(persistence_diagrams, num_landscapes=5, resolution=10000):
# landscape_transformer = Landscape(num_landscapes=num_landscapes, resolution=resolution)
# landscapes = landscape_transformer.fit_transform([d for d in persistence_diagrams if len(d) > 0]) # Filter out empty diagrams
# return landscapes
def compute_landscapes(persistence_diagrams, num_landscapes=5, resolution=10000):
landscape_transformer = Landscape(num_landscapes=num_landscapes, resolution=resolution)
landscapes = []
for diagram in tqdm(persistence_diagrams, desc="Computing Landscapes"):
if len(diagram) > 0:
landscape = landscape_transformer.fit_transform([diagram])[0]
landscapes.append(landscape)
return landscapes
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D")
model = EsmModel.from_pretrained("facebook/esm2_t33_650M_UR50D")
# Define layer to be used
layer = model.config.num_hidden_layers - 1
# Load the TSV file
file_path = 'clustering_and_evoprotgrad/filtered_protein_interaction_pairs.tsv'
protein_pairs_df = pd.read_csv(file_path, sep='\t')
# Only process the first 1000 proteins
protein_pairs_df = protein_pairs_df.head(10000)
# Extract concatenated sequences
concatenated_sequences = protein_pairs_df['Protein1'] + protein_pairs_df['Protein2']
# Initialize list to store persistent diagrams
persistent_diagrams = []
# Loop over concatenated sequences to compute their persistent diagrams
for sequence in tqdm(concatenated_sequences, desc="Computing Persistence Diagrams"):
hidden_states_matrix = get_hidden_states(sequence, tokenizer, model, layer)
distance_matrix = compute_euclidean_distance_matrix(hidden_states_matrix)
_, persistence_diagram = compute_persistent_homology(distance_matrix)
persistent_diagrams.append(persistence_diagram)
# Compute landscapes
landscapes = compute_landscapes(persistent_diagrams)
# Compute the L2 distances between landscapes
with tqdm(total=len(landscapes)*(len(landscapes)-1)//2, desc="Computing Pairwise L2 Distances") as pbar:
l2_distances = np.zeros((len(landscapes), len(landscapes)))
for i in range(len(landscapes)):
for j in range(i+1, len(landscapes)):
l2_distances[i, j] = l2_distances[j, i] = np.linalg.norm(landscapes[i] - landscapes[j])
pbar.update(1)
# Compute the second-level persistent homology using the L2 distance matrix
with tqdm(total=1, desc="Computing Second-Level Persistent Homology") as pbar:
st_2, persistence_2 = compute_persistent_homology2(l2_distances)
pbar.update(1)
# Function to calculate the epsilon for DBSCAN
def calculate_epsilon(persistence_diagrams, threshold_percentage, max_eps=np.inf):
lifetimes = [p[1][1] - p[1][0] for p in persistence_diagrams if p[0] == 0]
lifetimes.sort()
threshold_index = int(threshold_percentage * len(lifetimes))
epsilon = lifetimes[threshold_index]
# Ensure epsilon is within a reasonable range
epsilon = min(epsilon, max_eps)
return epsilon
# Calculate epsilon with a maximum threshold
threshold_percentage = 0.35 # 50%
max_epsilon = 5000.0 # Example maximum threshold
epsilon = calculate_epsilon(persistence_2, threshold_percentage, max_eps=max_epsilon)
# Perform DBSCAN clustering
with tqdm(total=1, desc="Performing DBSCAN Clustering") as pbar:
dbscan = DBSCAN(metric="precomputed", eps=epsilon, min_samples=1)
dbscan.fit(l2_distances) # Use L2 distances here
labels = dbscan.labels_
pbar.update(1)
# Add the cluster labels to the DataFrame
protein_pairs_df['Cluster'] = labels
# Save the DataFrame with cluster information
output_file_path = 'clustering_and_evoprotgrad/clustered_protein_pair_landscapes_l2_dist_100K.tsv'
protein_pairs_df.to_csv(output_file_path, sep='\t', index=False)
print(f"Clustered data saved to: {output_file_path}")