Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -41,7 +41,7 @@ configs:
|
|
41 |
dataset_info:
|
42 |
- config_name: train_test
|
43 |
features:
|
44 |
-
- name:
|
45 |
dtype: string
|
46 |
- name: ID
|
47 |
dtype: int64
|
@@ -110,11 +110,11 @@ and inspecting the loaded dataset
|
|
110 |
>>> train_test
|
111 |
DatasetDict({
|
112 |
train: Dataset({
|
113 |
-
features: ['
|
114 |
num_rows: 6862
|
115 |
})
|
116 |
test: Dataset({
|
117 |
-
features: ['
|
118 |
num_rows: 1714
|
119 |
})
|
120 |
})
|
@@ -141,13 +141,13 @@ then load, featurize, split, fit, and evaluate the a catboost model
|
|
141 |
|
142 |
split_featurised_dataset = featurise_dataset(
|
143 |
split_dataset,
|
144 |
-
column = "
|
145 |
representations = load_representations_from_dicts([{"name": "morgan"}, {"name": "maccs_rdkit"}]))
|
146 |
|
147 |
model = load_model_from_dict({
|
148 |
"name": "cat_boost_classifier",
|
149 |
"config": {
|
150 |
-
"x_features": ['
|
151 |
"y_features": ['endpoint']}})
|
152 |
|
153 |
model.train(split_featurised_dataset["train"])
|
|
|
41 |
dataset_info:
|
42 |
- config_name: train_test
|
43 |
features:
|
44 |
+
- name: SMILES
|
45 |
dtype: string
|
46 |
- name: ID
|
47 |
dtype: int64
|
|
|
110 |
>>> train_test
|
111 |
DatasetDict({
|
112 |
train: Dataset({
|
113 |
+
features: ['SMILES', 'ID', 'endpoint', 'MW'],
|
114 |
num_rows: 6862
|
115 |
})
|
116 |
test: Dataset({
|
117 |
+
features: ['SMILES', 'ID', 'endpoint', 'MW'],
|
118 |
num_rows: 1714
|
119 |
})
|
120 |
})
|
|
|
141 |
|
142 |
split_featurised_dataset = featurise_dataset(
|
143 |
split_dataset,
|
144 |
+
column = "SMILES",
|
145 |
representations = load_representations_from_dicts([{"name": "morgan"}, {"name": "maccs_rdkit"}]))
|
146 |
|
147 |
model = load_model_from_dict({
|
148 |
"name": "cat_boost_classifier",
|
149 |
"config": {
|
150 |
+
"x_features": ['SMILES::morgan', 'SMILES::maccs_rdkit'],
|
151 |
"y_features": ['endpoint']}})
|
152 |
|
153 |
model.train(split_featurised_dataset["train"])
|