File size: 9,921 Bytes
f4f6162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5ddb0a5
f4f6162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43d7890
 
 
f4f6162
 
 
43d7890
 
 
 
 
f4f6162
 
 
 
 
 
 
63dd9aa
f4f6162
 
 
 
 
 
 
 
b59d4b5
f4f6162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63dd9aa
f4f6162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63dd9aa
 
b59d4b5
63dd9aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4f6162
 
 
 
 
 
 
 
 
63dd9aa
f4f6162
63dd9aa
f4f6162
b59d4b5
f4f6162
63dd9aa
f4f6162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63dd9aa
f4f6162
 
b59d4b5
 
f4f6162
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
## Script to sanitize and split MutagenLou2023 dataset

#1. Import modules

pip install rdkit
pip install molvs
import pandas as pd
import numpy as np
import urllib.request
import tqdm
import rdkit
from rdkit import Chem
import molvs

standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()

#2. Import a dataset

# Download 'ames_data.csv' in the paper
#.  Chemical rules for optimization of chemical mutagenicity via matched molecular pairs analysis and machine learning methods
#.  Chaofeng Lou, Hongbin Yang, Hua Deng, Mengting Huang, Weihua Li, Guixia Liu, Philip W. Lee & Yun Tang
#.  https://github.com/Louchaofeng/Ames-mutagenicity-optimization/blob/main/data/ames_data.csv
Lou2023 = pd.read_csv("ames_data.csv")

#3. Resolve SMILES parse error

Lou2023.loc[Lou2023['smiles'] == 'O=Brc1ccc(\\C=C\\C(=O)c2ccccc2)cc1', 'smiles'] = "[O-][Br+]c1ccc(\\C=C\\C(=O)c2ccccc2)cc1"

#4. Sanitize with MolVS and print problems

Lou2023['X'] = [ \
    rdkit.Chem.MolToSmiles(
        fragment_remover.remove(
        standardizer.standardize(
        rdkit.Chem.MolFromSmiles(
        smiles))))
    for smiles in Lou2023['smiles']]

problems = []
for index, row in tqdm.tqdm(Lou2023.iterrows()):
    result = molvs.validate_smiles(row['X'])
    if len(result) == 0:
        continue
    problems.append( (row['ID'], result) )

#   Most are because it includes the salt form and/or it is not neutralized
for id, alert in problems:
    print(f"ID: {id}, problem: {alert[0]}")

# Result interpretation

# - WARNING: not removing hydrogen atom without neighbors : There are hydrogen atoms in the molecule that are not bonded to any other atoms.

#  - Can't kekulize mol: The error message means that kekulization would break the molecules down, so it couldn't proceed
#  It doesn't mean that the molecules are bad, it just means that normalization failed 

#  - Unusual charge on atom 0 number of radical electrons set to zero: It means that if nValence <0, sets it 0 and shows the warning
# https://github.com/rdkit/rdkit/blob/d6171aaade470100605f3e99918d31cd46a09199/Code/GraphMol/MolOps.cpp#L451

#  - Aborted reionization due to unexpected situation: It means that H wouldn't move when ppos < ipos and poccur[-1] == ioccur[-1]
# https://github.com/mcs07/MolVS/blob/d815fe52d160abcecbcbf117e6437bf727dbd8ad/molvs/charge.py#L206

#  - () is present: The error message is not about a salt, not about a fragment, 
#  It is showing there is a molecule () (ex) Benzene is present 

#5. Select columns and rename the dataset

Lou2023.rename(columns={'X': 'new SMILES'}, inplace=True)
newLou2023 = Lou2023[['new SMILES', 'ID', 'endpoint', 'MW']]

#6. Import modules to split the dataset

import sys
from rdkit import DataStructs
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import PandasTools 

# 7. Split the dataset into train and test

class MolecularFingerprint:
    def __init__(self, fingerprint):
        self.fingerprint = fingerprint

    def __str__(self):
        return self.fingerprint.__str__()

def compute_fingerprint(molecule):
    try:
        fingerprint = Chem.GetMorganFingerprintAsBitVect(molecule, 2, nBits=1024)
        result = np.zeros(len(fingerprint), np.int32)
        DataStructs.ConvertToNumpyArray(fingerprint, result)
        return MolecularFingerprint(result)
    except:
        print("Fingerprints for a structure cannot be calculated")
        return None

def tanimoto_distances_yield(fingerprints, num_fingerprints):
    for i in range(1, num_fingerprints):
        yield [1 - x for x in DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints[:i])]

def butina_cluster(fingerprints, num_points, distance_threshold, reordering=False):
    nbr_lists = [None] * num_points
    for i in range(num_points):
        nbr_lists[i] = []

    dist_fun = tanimoto_distances_yield(fingerprints, num_points)
    for i in range(1, num_points):
        dists = next(dist_fun)

        for j in range(i):
            dij = dists[j]
            if dij <= distance_threshold:
                nbr_lists[i].append(j)
                nbr_lists[j].append(i)

    t_lists = [(len(y), x) for x, y in enumerate(nbr_lists)]
    t_lists.sort(reverse=True)

    res = []
    seen = [0] * num_points
    while t_lists:
        _, idx = t_lists.pop(0)
        if seen[idx]:
            continue
        t_res = [idx]
        for nbr in nbr_lists[idx]:
            if not seen[nbr]:
                t_res.append(nbr)
                seen[nbr] = 1
        if reordering:
            nbr_nbr = [nbr_lists[t] for t in t_res]
            nbr_nbr = frozenset().union(*nbr_nbr)
            for x, y in enumerate(t_lists):
                y1 = y[1]
                if seen[y1] or (y1 not in nbr_nbr):
                    continue
                nbr_lists[y1] = set(nbr_lists[y1]).difference(t_res)
                t_lists[x] = (len(nbr_lists[y1]), y1)
            t_lists.sort(reverse=True)
        res.append(tuple(t_res))
    return tuple(res)

def hierarchal_cluster(fingerprints):

      num_fingerprints = len(fingerprints)

      av_cluster_size = 8
      dists = []

      for i in range(0, num_fingerprints):
          sims = DataStructs.BulkTanimotoSimilarity(fingerprints[i], fingerprints)
          dists.append([1 - x for x in sims])

      dis_array = ssd.squareform(dists)
      Z = hierarchy.linkage(dis_array)
      average_cluster_size = av_cluster_size
      cluster_amount = int(num_fingerprints / average_cluster_size)
      clusters = hierarchy.cut_tree(Z, n_clusters=cluster_amount)

      clusters = list(clusters.transpose()[0])
      cs = []
      for i in range(max(clusters) + 1):
          cs.append([])

      for i in range(len(clusters)):
          cs[clusters[i]].append(i)
      return cs

def cluster_fingerprints(fingerprints, method="Auto"):
    num_fingerprints = len(fingerprints)

    if method == "Auto":
        method = "TB" if num_fingerprints >= 10000 else "Hierarchy"

    if method == "TB":
        cutoff = 0.56
        print("Butina clustering is selected. Dataset size is:", num_fingerprints)
        clusters = butina_cluster(fingerprints, num_fingerprints, cutoff)

    elif method == "Hierarchy":
        print("Hierarchical clustering is selected. Dataset size is:", num_fingerprints)
        clusters = hierarchal_cluster(fingerprints)

    return clusters

def split_dataframe(dataframe, smiles_col_index, fraction_to_train, split_for_exact_fraction=True, cluster_method="Auto"):
    try:
        import math
        smiles_column_name = dataframe.columns[smiles_col_index]
        molecule = 'molecule'
        fingerprint = 'fingerprint'
        group = 'group'
        testing = 'testing'

        try:
            PandasTools.AddMoleculeColumnToFrame(dataframe, smiles_column_name, molecule)
        except:
            print("Exception occurred during molecule generation...")

        dataframe = dataframe.loc[dataframe[molecule].notnull()]
        dataframe[fingerprint] = [compute_fingerprint(m) for m in dataframe[molecule]]
        dataframe = dataframe.loc[dataframe[fingerprint].notnull()]

        fingerprints = [Chem.GetMorganFingerprintAsBitVect(m, 2, nBits=2048) for m in dataframe[molecule]]
        clusters = cluster_fingerprints(fingerprints, method=cluster_method)

        dataframe.drop([molecule, fingerprint], axis=1, inplace=True)

        last_training_index = int(math.ceil(len(dataframe) * fraction_to_train))
        clustered = None
        cluster_no = 0
        mol_count = 0

        for cluster in clusters:
            cluster_no = cluster_no + 1
            try:
                one_cluster = dataframe.iloc[list(cluster)].copy()
            except:
                print("Wrong indexes in Cluster: %i, Molecules: %i" % (cluster_no, len(cluster)))
                continue

            one_cluster.loc[:, 'ClusterNo'] = cluster_no
            one_cluster.loc[:, 'MolCount'] = len(cluster)

            if (mol_count < last_training_index) or (cluster_no < 2):
                one_cluster.loc[:, group] = 'training'
            else:
                one_cluster.loc[:, group] = testing

            mol_count += len(cluster)
            clustered = pd.concat([clustered, one_cluster], ignore_index=True)

        if split_for_exact_fraction:
            print("Adjusting test to train ratio. It may split one cluster")
            clustered.loc[last_training_index + 1:, group] = testing

        print("Clustering finished. Training set size is %i, Test set size is %i, Fraction %.2f" %
              (len(clustered.loc[clustered[group] != testing]),
               len(clustered.loc[clustered[group] == testing]),
               len(clustered.loc[clustered[group] == testing]) / len(clustered)))

    except KeyboardInterrupt:
        print("Clustering interrupted.")

    return clustered


def realistic_split(df, smile_col_index, frac_train, split_for_exact_frac=True, cluster_method = "Auto"):
  return split_dataframe(df.copy(), smile_col_index, frac_train, split_for_exact_frac, cluster_method=cluster_method)

def split_df_into_train_and_test_sets(df):
    df['group'] = df['group'].str.replace(' ', '_')
    df['group'] = df['group'].str.lower()
    train = df[df['group'] == 'training']
    test = df[df['group'] == 'testing']
    return train, test

    
# 8. Test and train datasets have been made

smiles_index = 0  # Because smiles is in the first column
realistic = realistic_split(newLou2023.copy(), smiles_index, 0.8, split_for_exact_frac=True, cluster_method="Auto")
realistic_train, realistic_test = split_df_into_train_and_test_sets(realistic)

#9. Select columns and name the datasets

selected_columns = realistic_train[['new SMILES', 'ID', 'endpoint', 'MW']]
selected_columns.to_csv("MutagenLou2023_train.csv", index=False)
selected_columns = realistic_test[['new SMILES', 'ID', 'endpoint', 'MW']]
selected_columns.to_csv("MutagenLou2023_test.csv", index=False)