woshixuhao commited on
Commit
6df09d4
·
1 Parent(s): e874b08

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +267 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ from rdkit import Chem
4
+ import sys
5
+ import joblib
6
+ sys.modules['sklearn.externals.joblib'] = joblib
7
+ from sklearn.externals import joblib
8
+ import numpy as np
9
+ from rdkit.Chem import Descriptors
10
+ from rdkit.Chem import rdMolDescriptors
11
+ from xgboost.sklearn import XGBClassifier,XGBRegressor
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from torch.autograd import Variable
15
+ from rdkit.Chem import MACCSkeys
16
+ import torch.nn as nn
17
+ import lightgbm as lgb
18
+ from sklearn.ensemble import RandomForestRegressor
19
+ import wget
20
+ import warnings
21
+ import gradio as gr
22
+ warnings.filterwarnings("ignore")
23
+
24
+ Eluent_smiles=['CCCCCC','CC(OCC)=O','C(Cl)Cl','CO','CCOCC']
25
+ def parse_args():
26
+ parser = argparse.ArgumentParser()
27
+ parser.add_argument('--file_path', type=str, default=os.getcwd()+'\TLC_dataset.xlsx', help='path of download dataset')
28
+ parser.add_argument('--dipole_path', type=str, default=os.getcwd() + '\compound_list_带化合物分类.xlsx',
29
+ help='path of dipole file')
30
+ parser.add_argument('--data_range', type=int, default=4944, help='utilized data range,robot:4114,manual:4458,new:4944')
31
+ parser.add_argument('--automatic_divide', type=bool, default=False, help='automatically divide dataset by 80% train,10% validate and 10% test')
32
+ parser.add_argument('--choose_total', type=int, default=387, help='train total num,robot:387,manual:530')
33
+ parser.add_argument('--choose_train', type=int, default=308, help='train num,robot:387,manual:530')
34
+ parser.add_argument('--choose_validate', type=int, default=38, help='validate num')
35
+ parser.add_argument('--choose_test', type=int, default=38, help='test num')
36
+ parser.add_argument('--seed', type=int, default=324, help='random seed for split dataset')
37
+ parser.add_argument('--torch_seed', type=int, default=324, help='random seed for torch')
38
+ parser.add_argument('--add_dipole', type=bool, default=True, help='add dipole into dataset')
39
+ parser.add_argument('--add_molecular_descriptors', type=bool, default=True, help='add molecular_descriptors (分子量(MW)、拓扑极性表面积(TPSA)、可旋转键的个数(NROTB)、氢键供体个数(HBA)、氢键受体个数(HBD)、脂水分配系数值(LogP)) into dataset')
40
+ parser.add_argument('--add_MACCkeys', type=bool, default=True,help='add MACCSkeys into dataset')
41
+ parser.add_argument('--add_eluent_matrix', type=bool, default=True,help='add eluent matrix into dataset')
42
+ parser.add_argument('--test_mode', type=str, default='robot', help='manual data or robot data or fix, costum test data')
43
+ parser.add_argument('--use_model', type=str, default='Ensemble',help='the utilized model (XGB,LGB,ANN,RF,Ensemble,Bayesian)')
44
+ parser.add_argument('--download_data', type=bool, default=False, help='use local dataset or download from dataset')
45
+ parser.add_argument('--use_sigmoid', type=bool, default=True, help='use sigmoid')
46
+ parser.add_argument('--shuffle_array', type=bool, default=True, help='shuffle_array')
47
+ parser.add_argument('--characterization_mode', type=str, default='standard',
48
+ help='the characterization mode for the dataset, including standard, precise_TPSA, no_multi')
49
+
50
+ #---------------parapmeters for plot---------------------
51
+ parser.add_argument('--plot_col_num', type=int, default=4, help='The col_num in plot')
52
+ parser.add_argument('--plot_row_num', type=int, default=4, help='The row_num in plot')
53
+ parser.add_argument('--plot_importance_num', type=int, default=10, help='The max importance num in plot')
54
+ #--------------parameters For LGB-------------------
55
+ parser.add_argument('--LGB_max_depth', type=int, default=5, help='max_depth for LGB')
56
+ parser.add_argument('--LGB_num_leaves', type=int, default=25, help='num_leaves for LGB')
57
+ parser.add_argument('--LGB_learning_rate', type=float, default=0.007, help='learning_rate for LGB')
58
+ parser.add_argument('--LGB_n_estimators', type=int, default=1000, help='n_estimators for LGB')
59
+ parser.add_argument('--LGB_early_stopping_rounds', type=int, default=200, help='early_stopping_rounds for LGB')
60
+
61
+ #---------------parameters for XGB-----------------------
62
+ parser.add_argument('--XGB_n_estimators', type=int, default=200, help='n_estimators for XGB')
63
+ parser.add_argument('--XGB_max_depth', type=int, default=3, help='max_depth for XGB')
64
+ parser.add_argument('--XGB_learning_rate', type=float, default=0.1, help='learning_rate for XGB')
65
+
66
+ #---------------parameters for RF------------------------
67
+ parser.add_argument('--RF_n_estimators', type=int, default=1000, help='n_estimators for RF')
68
+ parser.add_argument('--RF_random_state', type=int, default=1, help='random_state for RF')
69
+ parser.add_argument('--RF_n_jobs', type=int, default=1, help='n_jobs for RF')
70
+
71
+ #--------------parameters for ANN-----------------------
72
+ parser.add_argument('--NN_hidden_neuron', type=int, default=128, help='hidden neurons for NN')
73
+ parser.add_argument('--NN_optimizer', type=str, default='Adam', help='optimizer for NN (Adam,SGD,RMSprop)')
74
+ parser.add_argument('--NN_lr', type=float, default=0.005, help='learning rate for NN')
75
+ parser.add_argument('--NN_model_save_location', type=str, default=os.getcwd()+'\model_save_NN', help='learning rate for NN')
76
+ parser.add_argument('--NN_max_epoch', type=int, default=5000, help='max training epoch for NN')
77
+ parser.add_argument('--NN_add_sigmoid', type=bool, default=True, help='whether add sigmoid in NN')
78
+ parser.add_argument('--NN_add_PINN', type=bool, default=False, help='whether add PINN in NN')
79
+ parser.add_argument('--NN_epi', type=float, default=100.0, help='The coef of PINN Loss in NN')
80
+
81
+
82
+
83
+ config = parser.parse_args()
84
+ config.device = 'cuda' if torch.cuda.is_available() else 'cpu'
85
+ return config
86
+
87
+ class ANN(nn.Module):
88
+ '''
89
+ Construct artificial neural network
90
+ '''
91
+ def __init__(self, in_neuron, hidden_neuron, out_neuron,config):
92
+ super(ANN, self).__init__()
93
+ self.input_layer = nn.Linear(in_neuron, hidden_neuron)
94
+ self.hidden_layer = nn.Linear(hidden_neuron, hidden_neuron)
95
+ self.output_layer = nn.Linear(hidden_neuron, out_neuron)
96
+ self.NN_add_sigmoid=config.NN_add_sigmoid
97
+
98
+
99
+ def forward(self, x):
100
+ x = self.input_layer(x)
101
+ x = F.leaky_relu(x)
102
+ x = self.hidden_layer(x)
103
+ x = F.leaky_relu(x)
104
+ x = self.hidden_layer(x)
105
+ x = F.leaky_relu(x)
106
+ x = self.hidden_layer(x)
107
+ x = F.leaky_relu(x)
108
+ x = self.output_layer(x)
109
+ if self.NN_add_sigmoid==True:
110
+ x = F.sigmoid(x)
111
+ return x
112
+
113
+ class Model_ML():
114
+ def __init__(self,config,X_test):
115
+ super(Model_ML, self).__init__()
116
+ self.X_test=X_test
117
+ self.seed=config.seed
118
+ self.torch_seed=config.seed
119
+ self.config=config
120
+ self.add_dipole = config.add_dipole
121
+ self.add_molecular_descriptors = config.add_molecular_descriptors
122
+ self.add_eluent_matrix=config.add_eluent_matrix
123
+ self.use_sigmoid=config.use_sigmoid
124
+
125
+ self.use_model=config.use_model
126
+ self.LGB_max_depth=config.LGB_max_depth
127
+ self.LGB_num_leaves=config.LGB_num_leaves
128
+ self.LGB_learning_rate=config.LGB_learning_rate
129
+ self.LGB_n_estimators=config.LGB_n_estimators
130
+ self.LGB_early_stopping_rounds=config.LGB_early_stopping_rounds
131
+
132
+ self.XGB_n_estimators=config.XGB_n_estimators
133
+ self.XGB_max_depth = config.XGB_max_depth
134
+ self.XGB_learning_rate = config.XGB_learning_rate
135
+
136
+ self.RF_n_estimators=config.RF_n_estimators
137
+ self.RF_random_state=config.RF_random_state
138
+ self.RF_n_jobs=config.RF_n_jobs
139
+
140
+ self.NN_hidden_neuron=config.NN_hidden_neuron
141
+ self.NN_optimizer=config.NN_optimizer
142
+ self.NN_lr= config.NN_lr
143
+ self.NN_model_save_location=config.NN_model_save_location
144
+ self.NN_max_epoch=config.NN_max_epoch
145
+ self.NN_add_PINN=config.NN_add_PINN
146
+ self.NN_epi=config.NN_epi
147
+ self.device=config.device
148
+
149
+ self.plot_row_num=config.plot_row_num
150
+ self.plot_col_num=config.plot_col_num
151
+ self.plot_importance_num=config.plot_importance_num
152
+
153
+
154
+
155
+ def load_model(self):
156
+ model_LGB = lgb.LGBMRegressor(objective='regression', max_depth=self.LGB_max_depth,
157
+ num_leaves=self.LGB_num_leaves,
158
+ learning_rate=self.LGB_learning_rate, n_estimators=self.LGB_n_estimators)
159
+ model_XGB = XGBRegressor(seed=self.seed,
160
+ n_estimators=self.XGB_n_estimators,
161
+ max_depth=self.XGB_max_depth,
162
+ eval_metric='rmse',
163
+ learning_rate=self.XGB_learning_rate,
164
+ min_child_weight=1,
165
+ subsample=1,
166
+ colsample_bytree=1,
167
+ colsample_bylevel=1,
168
+ gamma=0)
169
+
170
+ model_RF = RandomForestRegressor(n_estimators=self.RF_n_estimators,
171
+ criterion='mse',
172
+ random_state=self.RF_random_state,
173
+ n_jobs=self.RF_n_jobs)
174
+
175
+ Net = ANN(self.X_test.shape[1], self.NN_hidden_neuron, 1, config=self.config).to(self.device)
176
+ #model_LGB = joblib.load('model_LGB.pkl')
177
+ wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_LGB.pkl')
178
+ wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_XGB.pkl')
179
+ wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_RF.pkl')
180
+ wget.download('https://huggingface.co/woshixuhao/Rf_prediction/resolve/main/model_ANN.pkl')
181
+ model_LGB = joblib.load('model_LGB.pkl')
182
+ model_XGB = joblib.load('model_XGB.pkl')
183
+ model_RF = joblib.load('model_RF.pkl')
184
+ Net.load_state_dict(torch.load('model_ANN.pkl'))
185
+ return model_LGB,model_XGB,model_RF,Net
186
+
187
+ def get_Rf(self):
188
+ model_LGB, model_XGB, model_RF, model_ANN = Model_ML.load_model(self)
189
+
190
+ X_test_ANN = Variable(torch.from_numpy(self.X_test.astype(np.float32)).to(self.device), requires_grad=True)
191
+ y_pred_ANN = model_ANN(X_test_ANN).cpu().data.numpy()
192
+ y_pred_ANN = y_pred_ANN.reshape(y_pred_ANN.shape[0], )
193
+
194
+
195
+ y_pred_XGB = model_XGB.predict(self.X_test)
196
+ if self.use_sigmoid == True:
197
+ y_pred_XGB = 1 / (1 + np.exp(-y_pred_XGB))
198
+
199
+ y_pred_LGB = model_LGB.predict(self.X_test)
200
+ if self.use_sigmoid == True:
201
+ y_pred_LGB = 1 / (1 + np.exp(-y_pred_LGB))
202
+
203
+ y_pred_RF = model_RF.predict(self.X_test)
204
+ if self.use_sigmoid == True:
205
+ y_pred_RF = 1 / (1 + np.exp(-y_pred_RF))
206
+
207
+ y_pred = (0.2 * y_pred_LGB + 0.2 * y_pred_XGB + 0.2 * y_pred_RF + 0.4 * y_pred_ANN)
208
+ return y_pred
209
+
210
+ def get_descriptor(smiles,ratio):
211
+ compound_mol = Chem.MolFromSmiles(smiles)
212
+ descriptor=[]
213
+ descriptor.append(Descriptors.ExactMolWt(compound_mol))
214
+ descriptor.append(Chem.rdMolDescriptors.CalcTPSA(compound_mol))
215
+ descriptor.append(Descriptors.NumRotatableBonds(compound_mol)) # Number of rotable bonds
216
+ descriptor.append(Descriptors.NumHDonors(compound_mol)) # Number of H bond donors
217
+ descriptor.append(Descriptors.NumHAcceptors(compound_mol)) # Number of H bond acceptors
218
+ descriptor.append(Descriptors.MolLogP(compound_mol)) # LogP
219
+ descriptor=np.array(descriptor)*ratio
220
+ return descriptor
221
+
222
+ def get_eluent_descriptor(eluent):
223
+ eluent=np.array(eluent)
224
+ des = np.zeros([6,])
225
+ for i in range(eluent.shape[0]):
226
+ if eluent[i] != 0:
227
+ e_descriptors = get_descriptor(Eluent_smiles[i], eluent[i])
228
+ des+=e_descriptors
229
+ return des
230
+
231
+ def get_data_from_smile(smile, eluent_list):
232
+ compound_mol = Chem.MolFromSmiles(smile)
233
+ Finger = MACCSkeys.GenMACCSKeys(Chem.MolFromSmiles(smile))
234
+ fingerprint = np.array([x for x in Finger])
235
+ compound_finger = fingerprint
236
+ compound_MolWt = Descriptors.ExactMolWt(compound_mol)
237
+ compound_TPSA = Chem.rdMolDescriptors.CalcTPSA(compound_mol)
238
+ compound_nRotB = Descriptors.NumRotatableBonds(compound_mol) # Number of rotable bonds
239
+ compound_HBD = Descriptors.NumHDonors(compound_mol) # Number of H bond donors
240
+ compound_HBA = Descriptors.NumHAcceptors(compound_mol) # Number of H bond acceptors
241
+ compound_LogP = Descriptors.MolLogP(compound_mol) # LogP
242
+ X_test = np.zeros([1, 179])
243
+ X_test[0, 0:167] = compound_finger
244
+ X_test[0, 167:173] = 0
245
+ X_test[0, 173:179] = [compound_MolWt, compound_TPSA, compound_nRotB, compound_HBD, compound_HBA, compound_LogP]
246
+
247
+ eluent_array = get_eluent_descriptor(eluent_list)
248
+ eluent_array = np.array(eluent_array)
249
+ X_test[0, 167:173] = eluent_array
250
+
251
+ return X_test
252
+
253
+ def predict_single(smile,PE,EA,DCM,MeOH,Et20):
254
+ config = parse_args()
255
+ config.add_dipole = False
256
+ eluent_list=[PE,EA,DCM,MeOH,Et20]
257
+ X_test=get_data_from_smile(smile,eluent_list)
258
+ Model = Model_ML(config,X_test)
259
+ Rf=Model.get_Rf()
260
+ return Rf[0]
261
+
262
+ if __name__=='__main__':
263
+ demo = gr.Interface(fn=predict_single, inputs=["text", "number","number","number","number","number"], outputs='number')
264
+ demo.launch(share=True)
265
+ # smile='O=C(OC1C(OC(C)=O)C(OC(C)=O)C(OC(C)=O)C(COC(C)=O)O1)C'
266
+ # eluent=[0,0.9,0,0,0]
267
+ # print(predict_single(smile,1,0,0,0,0))
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio==3.29.0
2
+ joblib==1.2.0
3
+ lightgbm==3.3.5
4
+ numpy==1.24.3
5
+ rdkit==2023.3.1
6
+ scikit_learn==1.2.2
7
+ torch==2.0.1+cu117
8
+ wget==3.2
9
+ xgboost==1.7.5