haneulpark commited on
Commit
f4f6162
1 Parent(s): a805e6a

Upload MutagenLou Preprocessing.py

Browse files
Files changed (1) hide show
  1. MutagenLou Preprocessing.py +263 -0
MutagenLou Preprocessing.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Script to sanitize and split MutagenLou2023 dataset
2
+
3
+ #1. Import modules
4
+
5
+ pip install rdkit
6
+ pip install molvs
7
+ import pandas as pd
8
+ import numpy as np
9
+ import urllib.request
10
+ import tqdm
11
+ import rdkit
12
+ from rdkit import Chem
13
+ import molvs
14
+
15
+ standardizer = molvs.Standardizer()
16
+ fragment_remover = molvs.fragment.FragmentRemover()
17
+
18
+ #2. Import a dataset
19
+
20
+ # Download 'ames_data.csv' in the paper
21
+ #. Chemical rules for optimization of chemical mutagenicity via matched molecular pairs analysis and machine learning methods
22
+ #. Chaofeng Lou, Hongbin Yang, Hua Deng, Mengting Huang, Weihua Li, Guixia Liu, Philip W. Lee & Yun Tang
23
+ #. https://github.com/Louchaofeng/Ames-mutagenicity-optimization/blob/main/data/ames_data.csv)
24
+ Lou2023 = pd.read_csv("ames_data.csv")
25
+
26
+ #3. Resolve SMILES parse error
27
+
28
+ Lou2023.loc[Lou2023['smiles'] == 'O=Brc1ccc(\\C=C\\C(=O)c2ccccc2)cc1', 'smiles'] = "[O-][Br+]c1ccc(\\C=C\\C(=O)c2ccccc2)cc1"
29
+
30
+ #4. Sanitize with MolVS and print problems
31
+
32
+ Lou2023['X'] = [ \
33
+ rdkit.Chem.MolToSmiles(
34
+ fragment_remover.remove(
35
+ standardizer.standardize(
36
+ rdkit.Chem.MolFromSmiles(
37
+ smiles))))
38
+ for smiles in Lou2023['smiles']]
39
+
40
+ problems = []
41
+ for index, row in tqdm.tqdm(Lou2023.iterrows()):
42
+ result = molvs.validate_smiles(row['X'])
43
+ if len(result) == 0:
44
+ continue
45
+ problems.append( (row['ID'], result) )
46
+
47
+ # Most are because it includes the salt form and/or it is not neutralized
48
+ for id, alert in problems:
49
+ print(f"ID: {id}, problem: {alert[0]}")
50
+
51
+ # Result interpretation
52
+ # - Can't kekulize mol: The error message means that kekulization would break the molecules down, so it couldn't proceed
53
+ # It doesn't mean that the molecules are bad, it just means that normalization failed
54
+
55
+ # Unusual charge on atom 0 number of radical electrons set to zero:
56
+ # Aborted reionization due to unexpected situation:
57
+
58
+ # - () is present: The error message is not about a salt, not about a fragment,
59
+ # It is showing there is a molecule () (ex) Benzene is present
60
+ #
61
+
62
+ #5. Select columns and rename the dataset
63
+
64
+ Lou2023.rename(columns={'X': 'new SMILES'}, inplace=True)
65
+ Lou2023[['new SMILES', 'ID', 'endpoint', 'MW']].to_csv('Lou2023.csv', index=False)
66
+
67
+ #6. Import modules to split the dataset
68
+
69
+ import sys
70
+ from rdkit import DataStructs
71
+ from rdkit.Chem import AllChem as Chem
72
+ from rdkit.Chem import PandasTools
73
+
74
+ #7. Split the dataset into test and train
75
+
76
+ class MolecularFingerprint:
77
+ def __init__(self, fingerprint):
78
+ self.fingerprint = fingerprint
79
+
80
+ def __str__(self):
81
+ return self.fingerprint.__str__()
82
+
83
+ def compute_fingerprint(molecule):
84
+ try:
85
+ fingerprint = Chem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=1024)
86
+ result = np.zeros(len(fingerprint), np.int32)
87
+ DataStructs.ConvertToNumpyArray(fingerprint, result)
88
+ return MolecularFingerprint(result)
89
+ except:
90
+ print("Fingerprints for a structure cannot be calculated")
91
+ return None
92
+
93
+ def tanimoto_distances_yield(fingerprints, num_fingerprints):
94
+ for i in range(1, num_fingerprints):
95
+ yield [1 - x for x in DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints[:i])]
96
+
97
+ def cluster_data(fingerprints, num_points, distance_threshold, reordering=False):
98
+ nbr_lists = [None] * num_points
99
+ for i in range(num_points):
100
+ nbr_lists[i] = []
101
+
102
+ dist_fun = tanimoto_distances_yield(fingerprints, num_points)
103
+ for i in range(1, num_points):
104
+ dists = next(dist_fun)
105
+
106
+ for j in range(i):
107
+ dij = dists[j]
108
+ if dij <= distance_threshold:
109
+ nbr_lists[i].append(j)
110
+ nbr_lists[j].append(i)
111
+
112
+ t_lists = [(len(y), x) for x, y in enumerate(nbr_lists)]
113
+ t_lists.sort(reverse=True)
114
+
115
+ res = []
116
+ seen = [0] * num_points
117
+ while t_lists:
118
+ _, idx = t_lists.pop(0)
119
+ if seen[idx]:
120
+ continue
121
+ t_res = [idx]
122
+ for nbr in nbr_lists[idx]:
123
+ if not seen[nbr]:
124
+ t_res.append(nbr)
125
+ seen[nbr] = 1
126
+ if reordering:
127
+ nbr_nbr = [nbr_lists[t] for t in t_res]
128
+ nbr_nbr = frozenset().union(*nbr_nbr)
129
+ for x, y in enumerate(t_lists):
130
+ y1 = y[1]
131
+ if seen[y1] or (y1 not in nbr_nbr):
132
+ continue
133
+ nbr_lists[y1] = set(nbr_lists[y1]).difference(t_res)
134
+ t_lists[x] = (len(nbr_lists[y1]), y1)
135
+ t_lists.sort(reverse=True)
136
+ res.append(tuple(t_res))
137
+ return tuple(res)
138
+
139
+ def cluster_fingerprints(fingerprints, method="Auto"):
140
+ num_fingerprints = len(fingerprints)
141
+
142
+ if method == "Auto":
143
+ method = "TB" if num_fingerprints >= 10000 else "Hierarchy"
144
+
145
+ if method == "TB":
146
+ cutoff = 0.56
147
+ print("Butina clustering is selected. Dataset size is:", num_fingerprints)
148
+ clusters = cluster_data(fingerprints, num_fingerprints, cutoff)
149
+
150
+ elif method == "Hierarchy":
151
+ import scipy.spatial.distance as ssd
152
+ from scipy.cluster import hierarchy
153
+
154
+ print("Hierarchical clustering is selected. Dataset size is:", num_fingerprints)
155
+
156
+ av_cluster_size = 8
157
+ dists = []
158
+
159
+ for i in range(0, num_fingerprints):
160
+ sims = DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints)
161
+ dists.append([1 - x for x in sims])
162
+
163
+ dis_array = ssd.squareform(dists)
164
+ Z = hierarchy.linkage(dis_array)
165
+ average_cluster_size = av_cluster_size
166
+ cluster_amount = int(num_fingerprints / average_cluster_size)
167
+ clusters = hierarchy.cut_tree(Z, n_clusters=cluster_amount)
168
+
169
+ clusters = list(clusters.transpose()[0])
170
+ cs = []
171
+ for i in range(max(clusters) + 1):
172
+ cs.append([])
173
+
174
+ for i in range(len(clusters)):
175
+ cs[clusters[i]].append(i)
176
+ return cs
177
+
178
+ def split_dataframe(dataframe, smiles_col_index, fraction_to_train, split_for_exact_fraction=True, cluster_method="Auto"):
179
+ try:
180
+ import math
181
+
182
+ smiles_column_name = dataframe.columns[smiles_col_index]
183
+ molecule = 'molecule'
184
+ fingerprint = 'fingerprint'
185
+ group = 'group'
186
+ testing = 'testing'
187
+
188
+ try:
189
+ PandasTools.AddMoleculeColumnToFrame(dataframe, smiles_column_name, molecule)
190
+ except:
191
+ print("Exception occurred during molecule generation...")
192
+
193
+ dataframe = dataframe.loc[dataframe[molecule].notnull()]
194
+ dataframe[fingerprint] = [compute_fingerprint(m) for m in dataframe[molecule]]
195
+ dataframe = dataframe.loc[dataframe[fingerprint].notnull()]
196
+
197
+ fingerprints = [Chem.GetMorganFingerprintAsBitVect(m, 2, nBits=2048) for m in dataframe[molecule]]
198
+ clusters = cluster_fingerprints(fingerprints, method=cluster_method)
199
+
200
+ dataframe.drop([molecule, fingerprint], axis=1, inplace=True)
201
+
202
+ last_training_index = int(math.ceil(len(dataframe) * fraction_to_train))
203
+ clustered = None
204
+ cluster_no = 0
205
+ mol_count = 0
206
+
207
+ for cluster in clusters:
208
+ cluster_no = cluster_no + 1
209
+ try:
210
+ one_cluster = dataframe.iloc[list(cluster)].copy()
211
+ except:
212
+ print("Wrong indexes in Cluster: %i, Molecules: %i" % (cluster_no, len(cluster)))
213
+ continue
214
+
215
+ one_cluster.loc[:, 'ClusterNo'] = cluster_no
216
+ one_cluster.loc[:, 'MolCount'] = len(cluster)
217
+
218
+ if (mol_count < last_training_index) or (cluster_no < 2):
219
+ one_cluster.loc[:, group] = 'training'
220
+ else:
221
+ one_cluster.loc[:, group] = testing
222
+
223
+ mol_count += len(cluster)
224
+ clustered = pd.concat([clustered, one_cluster], ignore_index=True)
225
+
226
+ if split_for_exact_fraction:
227
+ print("Adjusting test to train ratio. It may split one cluster")
228
+ clustered.loc[last_training_index + 1:, group] = testing
229
+
230
+ print("Clustering finished. Training set size is %i, Test set size is %i, Fraction %.2f" %
231
+ (len(clustered.loc[clustered[group] != testing]),
232
+ len(clustered.loc[clustered[group] == testing]),
233
+ len(clustered.loc[clustered[group] == testing]) / len(clustered)))
234
+
235
+ except KeyboardInterrupt:
236
+ print("Clustering interrupted.")
237
+
238
+ return clustered
239
+
240
+
241
+ def realistic_split(df, smile_col_index, frac_train, split_for_exact_frac=True, cluster_method = "Auto"):
242
+ return split_dataframe(df.copy(), smile_col_index, frac_train, split_for_exact_frac, cluster_method=cluster_method)
243
+
244
+ def split_df_into_train_and_test_sets(df):
245
+ df['group'] = df['group'].str.replace(' ', '_')
246
+ df['group'] = df['group'].str.lower()
247
+ train = df[df['group'] == 'training']
248
+ test = df[df['group'] == 'testing']
249
+ return train, test
250
+
251
+ # 8. Test and train datasets have been made
252
+
253
+ Mutagen = pd.read_csv('Lou2023.csv')
254
+ smiles_index = 0
255
+ realistic = realistic_split(Mutagen.copy(), smiles_index, 0.8, split_for_exact_frac=True, cluster_method="Auto")
256
+ realistic_train, realistic_test = split_df_into_train_and_test_sets(realistic)
257
+
258
+ #9. Select columns and name the datasets
259
+
260
+ selected_columns = realistic_train[['new SMILES', 'ID', 'endpoint', 'MW']]
261
+ selected_columns.to_csv("MutagenLou2023_train.csv", index=False)
262
+ selected_columns = realistic_test[['new SMILES', 'ID', 'endpoint', 'MW']]
263
+ selected_columns.to_csv("MutagenLou2023_test.csv", index=False)