|
import pandas as pd |
|
import numpy as np |
|
from transformers import EsmModel, AutoTokenizer |
|
import torch |
|
from scipy.spatial.distance import pdist, squareform |
|
from gudhi import RipsComplex |
|
from gudhi.representations.vector_methods import Landscape |
|
from sklearn.cluster import DBSCAN |
|
|
|
from tqdm import tqdm |
|
|
|
|
|
def get_hidden_states(sequence, tokenizer, model, layer): |
|
model.config.output_hidden_states = True |
|
encoded_input = tokenizer([sequence], return_tensors='pt', padding=True, truncation=True, max_length=1024) |
|
with torch.no_grad(): |
|
model_output = model(**encoded_input) |
|
hidden_states = model_output.hidden_states |
|
specific_hidden_states = hidden_states[layer][0] |
|
return specific_hidden_states.numpy() |
|
|
|
|
|
def compute_euclidean_distance_matrix(hidden_states): |
|
euclidean_distances = pdist(hidden_states, metric='euclidean') |
|
euclidean_distance_matrix = squareform(euclidean_distances) |
|
return euclidean_distance_matrix |
|
|
|
|
|
def compute_persistent_homology(distance_matrix, max_dimension=0): |
|
max_edge_length = np.max(distance_matrix) |
|
rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=max_edge_length) |
|
st = rips_complex.create_simplex_tree(max_dimension=max_dimension) |
|
st.persistence() |
|
persistence_pairs = np.array([[p[1][0], p[1][1]] for p in st.persistence() if p[0] == 0 and p[1][1] < np.inf]) |
|
return st, persistence_pairs |
|
|
|
|
|
def compute_persistent_homology2(distance_matrix, max_dimension=0): |
|
max_edge_length = np.max(distance_matrix) |
|
rips_complex = RipsComplex(distance_matrix=distance_matrix, max_edge_length=max_edge_length) |
|
st = rips_complex.create_simplex_tree(max_dimension=max_dimension) |
|
st.persistence() |
|
return st, st.persistence() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def compute_landscapes(persistence_diagrams, num_landscapes=5, resolution=10000): |
|
landscape_transformer = Landscape(num_landscapes=num_landscapes, resolution=resolution) |
|
landscapes = [] |
|
|
|
for diagram in tqdm(persistence_diagrams, desc="Computing Landscapes"): |
|
if len(diagram) > 0: |
|
landscape = landscape_transformer.fit_transform([diagram])[0] |
|
landscapes.append(landscape) |
|
|
|
return landscapes |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("facebook/esm2_t33_650M_UR50D") |
|
model = EsmModel.from_pretrained("facebook/esm2_t33_650M_UR50D") |
|
|
|
|
|
layer = model.config.num_hidden_layers - 1 |
|
|
|
|
|
file_path = 'clustering_and_evoprotgrad/filtered_protein_interaction_pairs.tsv' |
|
protein_pairs_df = pd.read_csv(file_path, sep='\t') |
|
|
|
|
|
protein_pairs_df = protein_pairs_df.head(10000) |
|
|
|
|
|
concatenated_sequences = protein_pairs_df['Protein1'] + protein_pairs_df['Protein2'] |
|
|
|
|
|
persistent_diagrams = [] |
|
|
|
|
|
for sequence in tqdm(concatenated_sequences, desc="Computing Persistence Diagrams"): |
|
hidden_states_matrix = get_hidden_states(sequence, tokenizer, model, layer) |
|
distance_matrix = compute_euclidean_distance_matrix(hidden_states_matrix) |
|
_, persistence_diagram = compute_persistent_homology(distance_matrix) |
|
persistent_diagrams.append(persistence_diagram) |
|
|
|
|
|
landscapes = compute_landscapes(persistent_diagrams) |
|
|
|
|
|
with tqdm(total=len(landscapes)*(len(landscapes)-1)//2, desc="Computing Pairwise L2 Distances") as pbar: |
|
l2_distances = np.zeros((len(landscapes), len(landscapes))) |
|
for i in range(len(landscapes)): |
|
for j in range(i+1, len(landscapes)): |
|
l2_distances[i, j] = l2_distances[j, i] = np.linalg.norm(landscapes[i] - landscapes[j]) |
|
pbar.update(1) |
|
|
|
|
|
with tqdm(total=1, desc="Computing Second-Level Persistent Homology") as pbar: |
|
st_2, persistence_2 = compute_persistent_homology2(l2_distances) |
|
pbar.update(1) |
|
|
|
|
|
def calculate_epsilon(persistence_diagrams, threshold_percentage, max_eps=np.inf): |
|
lifetimes = [p[1][1] - p[1][0] for p in persistence_diagrams if p[0] == 0] |
|
lifetimes.sort() |
|
threshold_index = int(threshold_percentage * len(lifetimes)) |
|
epsilon = lifetimes[threshold_index] |
|
|
|
epsilon = min(epsilon, max_eps) |
|
return epsilon |
|
|
|
|
|
threshold_percentage = 0.35 |
|
max_epsilon = 5000.0 |
|
epsilon = calculate_epsilon(persistence_2, threshold_percentage, max_eps=max_epsilon) |
|
|
|
|
|
with tqdm(total=1, desc="Performing DBSCAN Clustering") as pbar: |
|
dbscan = DBSCAN(metric="precomputed", eps=epsilon, min_samples=1) |
|
dbscan.fit(l2_distances) |
|
labels = dbscan.labels_ |
|
pbar.update(1) |
|
|
|
|
|
protein_pairs_df['Cluster'] = labels |
|
|
|
|
|
output_file_path = 'clustering_and_evoprotgrad/clustered_protein_pair_landscapes_l2_dist_100K.tsv' |
|
protein_pairs_df.to_csv(output_file_path, sep='\t', index=False) |
|
|
|
print(f"Clustered data saved to: {output_file_path}") |