/
kaggle_datasets.py
179 lines (147 loc) · 6.59 KB
/
kaggle_datasets.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
"""
KAGGLE dataset loader.
"""
import os
import logging
import time
import numpy as np
import deepchem
from deepchem.molnet.load_function.kaggle_features import merck_descriptors
logger = logging.getLogger(__name__)
def remove_missing_entries(dataset):
"""Remove missing entries.
Some of the datasets have missing entries that sneak in as zero'd out
feature vectors. Get rid of them.
"""
for i, (X, y, w, ids) in enumerate(dataset.itershards()):
available_rows = X.any(axis=1)
logger.info("Shard %d has %d missing entries." %
(i, np.count_nonzero(~available_rows)))
X = X[available_rows]
y = y[available_rows]
w = w[available_rows]
ids = ids[available_rows]
dataset.set_shard(i, X, y, w, ids)
def get_transformers(train_dataset):
"""Get transformers applied to datasets."""
transformers = []
#transformers = [
# deepchem.trans.LogTransformer(transform_X=True),
# deepchem.trans.NormalizationTransformer(transform_y=True,
# dataset=train_dataset)]
return transformers
# Set shard size low to avoid memory problems.
def gen_kaggle(KAGGLE_tasks,
train_dir,
valid_dir,
test_dir,
data_dir,
shard_size=2000):
"""Load KAGGLE datasets. Does not do train/test split"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
# Set some global variables up top
train_files = os.path.join(data_dir,
"KAGGLE_training_disguised_combined_full.csv.gz")
valid_files = os.path.join(data_dir,
"KAGGLE_test1_disguised_combined_full.csv.gz")
test_files = os.path.join(data_dir,
"KAGGLE_test2_disguised_combined_full.csv.gz")
if not os.path.exists(train_files):
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/KAGGLE_training_disguised_combined_full.csv.gz',
dest_dir=data_dir)
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/KAGGLE_test1_disguised_combined_full.csv.gz',
dest_dir=data_dir)
deepchem.utils.download_url(
'http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/KAGGLE_test2_disguised_combined_full.csv.gz',
dest_dir=data_dir)
# Featurize KAGGLE dataset
logger.info("About to featurize KAGGLE dataset.")
featurizer = deepchem.feat.UserDefinedFeaturizer(merck_descriptors)
loader = deepchem.data.UserCSVLoader(
tasks=KAGGLE_tasks, id_field="Molecule", featurizer=featurizer)
logger.info("Featurizing train datasets")
train_dataset = loader.featurize(train_files, shard_size=shard_size)
logger.info("Featurizing valid datasets")
valid_dataset = loader.featurize(valid_files, shard_size=shard_size)
logger.info("Featurizing test datasets")
test_dataset = loader.featurize(test_files, shard_size=shard_size)
logger.info("Remove missing entries from datasets.")
remove_missing_entries(train_dataset)
remove_missing_entries(valid_dataset)
remove_missing_entries(test_dataset)
logger.info("Shuffling order of train dataset.")
train_dataset.sparse_shuffle()
logger.info("Transforming datasets with transformers.")
transformers = get_transformers(train_dataset)
for transformer in transformers:
logger.info(
"Performing transformations with %s" % transformer.__class__.__name__)
logger.info("Transforming datasets")
train_dataset = transformer.transform(train_dataset)
valid_dataset = transformer.transform(valid_dataset)
test_dataset = transformer.transform(test_dataset)
logger.info("Moving directories")
train_dataset.move(train_dir)
valid_dataset.move(valid_dir)
test_dataset.move(test_dir)
############################################################## TIMING
time2 = time.time()
logger.info("TIMING: KAGGLE fitting took %0.3f s" % (time2 - time1))
############################################################## TIMING
return train_dataset, valid_dataset, test_dataset
def load_kaggle(shard_size=2000, featurizer=None, split=None, reload=True):
"""Loads kaggle datasets. Generates if not stored already.
The Kaggle dataset is an in-house dataset from Merck that was first introduced in the following paper:
Ma, Junshui, et al. "Deep neural nets as a method for quantitative structure–activity relationships." Journal of chemical information and modeling 55.2 (2015): 263-274.
It contains 100,000 unique Merck in-house compounds that were
measured on 15 enzyme inhibition and ADME/TOX datasets.
Unlike most of the other datasets featured in MoleculeNet,
the Kaggle collection does not have structures for the
compounds tested since they were proprietary Merck compounds.
However, the collection does feature pre-computed descriptors
for these compounds.
Note that the original train/valid/test split from the source
data was preserved here, so this function doesn't allow for
alternate modes of splitting. Similarly, since the source data
came pre-featurized, it is not possible to apply alternative
featurizations.
Parameters
----------
shard_size: int, optional
Size of the DiskDataset shards to write on disk
featurizer: optional
Ignored since featurization pre-computed
split: optional
Ignored since split pre-computed
reload: bool, optional
Whether to automatically re-load from disk
"""
KAGGLE_tasks = [
'3A4', 'CB1', 'DPP4', 'HIVINT', 'HIV_PROT', 'LOGD', 'METAB', 'NK1', 'OX1',
'OX2', 'PGP', 'PPB', 'RAT_F', 'TDI', 'THROMBIN'
]
data_dir = deepchem.utils.get_data_dir()
data_dir = os.path.join(data_dir, "kaggle")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
train_dir = os.path.join(data_dir, "train_dir")
valid_dir = os.path.join(data_dir, "valid_dir")
test_dir = os.path.join(data_dir, "test_dir")
if (os.path.exists(train_dir) and os.path.exists(valid_dir) and
os.path.exists(test_dir)):
logger.info("Reloading existing datasets")
train_dataset = deepchem.data.DiskDataset(train_dir)
valid_dataset = deepchem.data.DiskDataset(valid_dir)
test_dataset = deepchem.data.DiskDataset(test_dir)
else:
logger.info("Featurizing datasets")
train_dataset, valid_dataset, test_dataset = \
gen_kaggle(KAGGLE_tasks, train_dir, valid_dir, test_dir, data_dir,
shard_size=shard_size)
transformers = get_transformers(train_dataset)
return KAGGLE_tasks, (train_dataset, valid_dataset,
test_dataset), transformers