From 2c23fc6d9ccbfe543d85664163e96f195e1da114 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Mon, 1 Jun 2020 18:19:59 -0700 Subject: [PATCH 01/23] Changes --- deepchem/hyper/__init__.py | 3 +- deepchem/hyper/base_classes.py | 47 +++++++++++ deepchem/hyper/gaussian_process.py | 53 ++++++------- deepchem/hyper/grid_search.py | 77 ++++++++++++------- .../tests/test_gaussian_hyperparam_opt.py | 42 ++++++++++ deepchem/hyper/tests/test_hyperparam_opt.py | 35 ++++++--- docs/hyper.rst | 11 ++- examples/hyperparam_opt/README.md | 4 + 8 files changed, 204 insertions(+), 68 deletions(-) create mode 100644 deepchem/hyper/base_classes.py create mode 100644 deepchem/hyper/tests/test_gaussian_hyperparam_opt.py create mode 100644 examples/hyperparam_opt/README.md diff --git a/deepchem/hyper/__init__.py b/deepchem/hyper/__init__.py index c383293143..29adcf5605 100644 --- a/deepchem/hyper/__init__.py +++ b/deepchem/hyper/__init__.py @@ -1,2 +1,3 @@ -from deepchem.hyper.grid_search import HyperparamOpt +from deepchem.hyper.base_classes import HyperparamOpt +from deepchem.hyper.grid_search import GridHyperparamOpt from deepchem.hyper.gaussian_process import GaussianProcessHyperparamOpt diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py new file mode 100644 index 0000000000..d2480f03e4 --- /dev/null +++ b/deepchem/hyper/base_classes.py @@ -0,0 +1,47 @@ +class HyperparamOpt(object): + """Abstract superclass for hyperparameter search classes. + + This class is an abstract base class for hyperparameter search + classes in DeepChem. Hyperparameter search is performed on + `dc.model.Model` classes. Each hyperparameter object accepts a + `dc.models.Model` class upon construct. When the `hyperparam_search` + class is invoked, this class is used to construct many different + concrete models which are trained on the specified training set and + evaluated on a given validation set. + + Different subclasses of `HyperparamOpt` differ in the choice of + strategy for searching the hyperparameter evaluation space. This + class itself is an abstract superclass and should never be directly + instantiated. + """ + + def __init__(self, model_class): + """Initialize Hyperparameter Optimizer. + + Note this is an abstract constructor which should only be used by + subclasses. + + Example + ------- + This example shows the type of constructor function expected. + + >>> import sklearn + >>> import deepchem as dc + >>> def rf_model_builder(model_params, model_dir): + sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + Parameters + ---------- + model_class: constructor function. + This parameter must be constructor function which returns an + object which is an instance of `dc.model.Model`. This function + must accept two arguments, `model_params` of type `dict` and + `model_dir`, a string specifying a path to a model directory. + See the example. + """ + if self.__class__.__name__ == "HyperparamOpt": + raise ValueError( + "HyperparamOpt is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead." + ) + self.model_class = model_class diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 251869fb37..1eb5e5d759 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -5,7 +5,8 @@ import numpy as np import tempfile import os -from deepchem.hyper.grid_search import HyperparamOpt +import deepchem +from deepchem.hyper.base_classes import HyperparamOpt from deepchem.utils.evaluate import Evaluator from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression @@ -39,10 +40,10 @@ def hyperparam_search( log_file='GPhypersearch.log'): """Perform hyperparams search using a gaussian process assumption - params_dict include single-valued parameters being optimized, - which should only contain int, float and list of int(float) - - parameters with names in hp_invalid_list will not be changed. + `params_dict` should map names of parameters being optimized to a + list of parameter values, which should only contain int, float and + list of int(float). Parameters with names in hp_invalid_list will + not be changed/ For Molnet models, self.model_class is model name in string, params_dict = dc.molnet.preset_hyper_parameters.hps[self.model_class] @@ -52,29 +53,30 @@ def hyperparam_search( params_dict: dict dict including parameters and their initial values parameters not suitable for optimization can be added to hp_invalid_list - train_dataset: dc.data.Dataset struct + train_dataset: `dc.data.Dataset` dataset used for training - valid_dataset: dc.data.Dataset struct + valid_dataset: `dc.data.Dataset` dataset used for validation(optimization on valid scores) - output_transformers: list of dc.trans.Transformer + output_transformers: list[dc.trans.Transformer] transformers for evaluation - metric: list of dc.metrics.Metric + metric: `dc.metrics.Metric` metric used for evaluation - direction: bool + direction: bool, (default True) maximization(True) or minimization(False) - n_features: int + n_features: int, (default 1024) number of input features - n_tasks: int + n_tasks: int, (default 1) number of tasks - max_iter: int + max_iter: int, (default 20) number of optimization trials - search_range: int(float) + search_range: int(float) (default 4) optimization on [initial values / search_range, initial values * search_range] - hp_invalid_list: list + hp_invalid_list: list, (default `['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']`) names of parameters that should not be optimized logfile: string - name of log file, hyperparameters and results for each trial will be recorded + name of log file, hyperparameters and results for each trial + will be recorded Returns ------- @@ -82,10 +84,7 @@ def hyperparam_search( params_dict with all optimized values valid_performance_opt: float best performance on valid dataset - """ - - assert len(metric) == 1, 'Only use one metric' hyper_parameters = params_dict hp_list = list(hyper_parameters.keys()) for hp in hp_invalid_list: @@ -136,7 +135,7 @@ def hyperparam_search( param_name = ['l' + format(i, '02d') for i in range(20)] param = dict(zip(param_name[:n_param], param_range)) - data_dir = os.environ['DEEPCHEM_DATA_DIR'] + data_dir = deepchem.utils.get_data_dir() log_file = os.path.join(data_dir, log_file) def f(l00=0, @@ -186,7 +185,7 @@ def f(l00=0, float(args[param_name[j]]) for j in range(i, i + hp[1]) ] if param_range[i][0] == 'int': - hyper_parameters[hp[0]] = map(int, hyper_parameters[hp[0]]) + hyper_parameters[hp[0]] = list(map(int, hyper_parameters[hp[0]])) i = i + hp[1] logger.info(hyper_parameters) @@ -195,8 +194,7 @@ def f(l00=0, # Record hyperparameters f.write(str(hyper_parameters)) f.write('\n') - if isinstance(self.model_class, str) or isinstance( - self.model_class, unicode): + if isinstance(self.model_class, str): try: train_scores, valid_scores, _ = benchmark_classification( train_dataset, @@ -224,8 +222,8 @@ def f(l00=0, model.fit(train_dataset, **hyper_parameters) model.save() evaluator = Evaluator(model, valid_dataset, output_transformers) - multitask_scores = evaluator.compute_model_performance(metric) - score = multitask_scores[metric[0].name] + multitask_scores = evaluator.compute_model_performance([metric]) + score = multitask_scores[metric.name] with open(log_file, 'a') as f: # Record performances @@ -262,7 +260,7 @@ def f(l00=0, float(hp_opt[param_name[j]]) for j in range(i, i + hp[1]) ] if param_range[i][0] == 'int': - hyper_parameters[hp[0]] = map(int, hyper_parameters[hp[0]]) + hyper_parameters[hp[0]] = list(map(int, hyper_parameters[hp[0]])) i = i + hp[1] # Compare best model to default hyperparameters @@ -270,8 +268,7 @@ def f(l00=0, # Record hyperparameters f.write(str(params_dict)) f.write('\n') - if isinstance(self.model_class, str) or isinstance(self.model_class, - unicode): + if isinstance(self.model_class, str): try: train_scores, valid_scores, _ = benchmark_classification( train_dataset, diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index 59a0d1ffab..2ff782f516 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- """ Contains basic hyperparameter optimizations. """ @@ -9,23 +7,23 @@ import tempfile import shutil import collections +import logging from functools import reduce from operator import mul from deepchem.utils.evaluate import Evaluator -from deepchem.utils.save import log +from deepchem.hyper.base_classes import HyperparamOpt -class HyperparamOpt(object): - """ - Provides simple hyperparameter search capabilities. +class GridHyperparamOpt(HyperparamOpt): """ + Provides simple grid hyperparameter search capabilities. - def __init__(self, model_class, verbose=True): - self.model_class = model_class - self.verbose = verbose + This class performs a grid hyperparameter search over the specified + hyperparameter space. This implementation is simple and simply does + a direct iteration over all possible hyperparameters and doesn't use + parallelization to speed up the search. + """ - # TODO(rbharath): This function is complicated and monolithic. Is there a nice - # way to refactor this? def hyperparam_search(self, params_dict, train_dataset, @@ -36,10 +34,35 @@ def hyperparam_search(self, logdir=None): """Perform hyperparams search according to params_dict. - Each key to hyperparams_dict is a model_param. The values should be a list - of potential values for that hyperparam. + Each key to hyperparams_dict is a model_param. The values should + be a list of potential values for that hyperparam. + + Parameters + ---------- + params_dict: dict + dict including parameters and their initial values. + train_dataset: `dc.data.Dataset` + dataset used for training + valid_dataset: `dc.data.Dataset` + dataset used for validation(optimization on valid scores) + output_transformers: list of dc.trans.Transformer + transformers for evaluation + metric: dc.metrics.Metric + metric used for evaluation + use_max: bool, optional + If True, return the model with the highest score. Else return + model with the minimum score. + logdir: str, optional + The directory in which to store created models. If not set, will + use a temporary directory. - TODO(rbharath): This shouldn't be stored in a temporary directory. + Returns + ------- + `(best_model, best_hyperparams, all_scores)` where `best_model` is + an instance of `dc.model.Models`, `best_hyperparams` is a + dictionary of parameters, and `all_scores` is a dictionary mapping + string representations of hyperparameter sets to validation + scores. """ hyperparams = params_dict.keys() hyperparam_vals = params_dict.values() @@ -58,20 +81,19 @@ def hyperparam_search(self, for ind, hyperparameter_tuple in enumerate( itertools.product(*hyperparam_vals)): model_params = {} - log("Fitting model %d/%d" % (ind + 1, number_combinations), self.verbose) + logger.info("Fitting model %d/%d" % (ind + 1, number_combinations)) for hyperparam, hyperparam_val in zip(hyperparams, hyperparameter_tuple): model_params[hyperparam] = hyperparam_val - log("hyperparameters: %s" % str(model_params), self.verbose) + logger.info("hyperparameters: %s" % str(model_params)) if logdir is not None: model_dir = os.path.join(logdir, str(ind)) - log("model_dir is %s" % model_dir, self.verbose) + logger.info("model_dir is %s" % model_dir) try: os.makedirs(model_dir) except OSError: if not os.path.isdir(model_dir): - log("Error creating model_dir, using tempfile directory", - self.verbose) + logger.info("Error creating model_dir, using tempfile directory") model_dir = tempfile.mkdtemp() else: model_dir = tempfile.mkdtemp() @@ -95,21 +117,18 @@ def hyperparam_search(self, else: shutil.rmtree(model_dir) - log( - "Model %d/%d, Metric %s, Validation set %s: %f" % - (ind + 1, number_combinations, metric.name, ind, valid_score), - self.verbose) - log("\tbest_validation_score so far: %f" % best_validation_score, - self.verbose) + logger.info("Model %d/%d, Metric %s, Validation set %s: %f" % + (ind + 1, number_combinations, metric.name, ind, valid_score)) + logger.info("\tbest_validation_score so far: %f" % best_validation_score) if best_model is None: - log("No models trained correctly.", self.verbose) + logger.info("No models trained correctly.") # arbitrarily return last model best_model, best_hyperparams = model, hyperparameter_tuple return best_model, best_hyperparams, all_scores train_evaluator = Evaluator(best_model, train_dataset, output_transformers) multitask_scores = train_evaluator.compute_model_performance([metric]) train_score = multitask_scores[metric.name] - log("Best hyperparameters: %s" % str(best_hyperparams), self.verbose) - log("train_score: %f" % train_score, self.verbose) - log("validation_score: %f" % best_validation_score, self.verbose) + logger.info("Best hyperparameters: %s" % str(best_hyperparams)) + logger.info("train_score: %f" % train_score) + logger.info("validation_score: %f" % best_validation_score) return best_model, best_hyperparams, all_scores diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py new file mode 100644 index 0000000000..a0a3fda39f --- /dev/null +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -0,0 +1,42 @@ +""" +Tests for Gaussian Process Hyperparameter Optimization. +""" +import numpy as np +import sklearn +import deepchem as dc +import unittest + + +class TestGaussianHyperparamOpt(unittest.TestCase): + """ + Test Gaussian Hyperparameter Optimization. + """ + + def test_rf_example(self): + + def rf_model_builder(model_params, model_dir): + sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + train_dataset = dc.data.NumpyDataset( + X=np.random.rand(50, 5), y=np.random.rand(50, 1)) + valid_dataset = dc.data.NumpyDataset( + X=np.random.rand(20, 5), y=np.random.rand(20, 1)) + optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) + params_dict = {"n_estimators": 40} + transformers = [ + dc.trans.NormalizationTransformer( + transform_y=True, dataset=train_dataset) + ] + metric = dc.metrics.Metric(dc.metrics.r2_score) + + best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, train_dataset, valid_dataset, transformers, metric) + + ######################################## + print("best_hyperparams") + print(best_hyperparams) + print("all_results") + print(all_results) + assert 0 == 1 + ######################################## diff --git a/deepchem/hyper/tests/test_hyperparam_opt.py b/deepchem/hyper/tests/test_hyperparam_opt.py index 41b4e2ac76..22d300e68d 100644 --- a/deepchem/hyper/tests/test_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_hyperparam_opt.py @@ -1,10 +1,6 @@ """ -Integration tests for hyperparam optimization. +Tests for hyperparam optimization. """ -__author__ = "Bharath Ramsundar" -__copyright__ = "Copyright 2016, Stanford University" -__license__ = "MIT" - import os import unittest import tempfile @@ -12,13 +8,34 @@ import numpy as np import tensorflow as tf import deepchem as dc +import sklearn from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor class TestHyperparamOpt(unittest.TestCase): """ - Test hyperparameter optimization API. + Test abstract superclass behavior. + """ + + def test_cant_be_initialized(self): + """Test HyperparamOpt can't be initialized.""" + initialized = True + + def rf_model_builder(model_params, model_dir): + sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) + return dc.model.SklearnModel(sklearn_model, model_dir) + + try: + opt = dc.hyper.HyperparamOpt(rf_model_builder) + except: + initialized = False + assert not initialized + + +class TestGridHyperparamOpt(unittest.TestCase): + """ + Test grid hyperparameter optimization API. """ def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): @@ -50,7 +67,7 @@ def rf_model_builder(model_params, model_dir): sklearn_model = RandomForestRegressor(**model_params) return dc.models.SklearnModel(sklearn_model, model_dir) - optimizer = dc.hyper.HyperparamOpt(rf_model_builder) + optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, @@ -102,7 +119,7 @@ def model_builder(model_dir): return dc.models.SingletaskToMultitask(tasks, model_builder, model_dir) - optimizer = dc.hyper.HyperparamOpt(multitask_model_builder) + optimizer = dc.hyper.GridHyperparamOpt(multitask_model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, @@ -144,7 +161,7 @@ def model_builder(model_params, model_dir): return dc.models.MultitaskClassifier( len(tasks), n_features, model_dir=model_dir, **model_params) - optimizer = dc.hyper.HyperparamOpt(model_builder) + optimizer = dc.hyper.GridHyperparamOpt(model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, diff --git a/docs/hyper.rst b/docs/hyper.rst index e21eb120dc..8d4de51d83 100644 --- a/docs/hyper.rst +++ b/docs/hyper.rst @@ -8,6 +8,13 @@ learning algorithm used for the rest of learning and have to be set in an alternate fashion. The :code:`dc.hyper` module contains utilities for hyperparameter tuning. +Hyperparameter Optimization API +------------------------------- + +.. autoclass:: deepchem.hyper.HyperparamOpt + :members: + :special-members: + Grid Hyperparameter Optimization -------------------------------- @@ -15,13 +22,15 @@ This is the simplest form of hyperparameter optimization that simply involves iterating over a fixed grid of possible values for hyperaparameters. -.. autoclass:: deepchem.hyper.HyperparamOpt +.. autoclass:: deepchem.hyper.GridHyperparamOpt :members: + :special-members: Gaussian Process Hyperparameter Optimization -------------------------------------------- .. autoclass:: deepchem.hyper.GaussianProcessHyperparamOpt :members: + :special-members: diff --git a/examples/hyperparam_opt/README.md b/examples/hyperparam_opt/README.md new file mode 100644 index 0000000000..c3a5b6b654 --- /dev/null +++ b/examples/hyperparam_opt/README.md @@ -0,0 +1,4 @@ +# Hyperparameter Optimization + +In this folder we provide examples of performing hyperparameter optimization +with DeepChem. From c872eaf5c276f01e6b7492c28404cf07a43776ab Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Mon, 1 Jun 2020 18:23:00 -0700 Subject: [PATCH 02/23] Added example file --- .../hyperparam_opt/gaussian_hyperparam_opt.py | 23 +++++++++++++++++++ .../hyperparam_opt/grid_hyperparam_opt.py | 0 2 files changed, 23 insertions(+) create mode 100644 examples/hyperparam_opt/gaussian_hyperparam_opt.py create mode 100644 examples/hyperparam_opt/grid_hyperparam_opt.py diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py new file mode 100644 index 0000000000..264ad7de86 --- /dev/null +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -0,0 +1,23 @@ +import numpy as np +np.random.seed(123) +import tensorflow as tf +tf.random.set_seed(123) +import deepchem as dc + +# Load delaney dataset +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +train, valid, test= delaney_datasets + +# Fit models +regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + +# TODO(rbharath): I don't like this awkward string/class divide. Maybe clean up? +optimizer = dc.hyper.GaussianProcessHyperparamOpt('tf_regression') +best_hyper_params, best_performance = optimizer.hyperparam_search( + dc.molnet.preset_hyper_parameters.hps['tf_regression'], + train, + valid, + transformers, + [regression_metric] +) diff --git a/examples/hyperparam_opt/grid_hyperparam_opt.py b/examples/hyperparam_opt/grid_hyperparam_opt.py new file mode 100644 index 0000000000..e69de29bb2 From cda5a592a2e2d6465feee3b2bed1e34447b20c11 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Mon, 1 Jun 2020 18:23:19 -0700 Subject: [PATCH 03/23] Changes --- examples/hyperparam_opt/gaussian_hyperparam_opt.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py index 264ad7de86..4dfdc25906 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -6,18 +6,13 @@ # Load delaney dataset delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() -train, valid, test= delaney_datasets +train, valid, test = delaney_datasets # Fit models regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - # TODO(rbharath): I don't like this awkward string/class divide. Maybe clean up? optimizer = dc.hyper.GaussianProcessHyperparamOpt('tf_regression') best_hyper_params, best_performance = optimizer.hyperparam_search( - dc.molnet.preset_hyper_parameters.hps['tf_regression'], - train, - valid, - transformers, - [regression_metric] -) + dc.molnet.preset_hyper_parameters.hps['tf_regression'], train, valid, + transformers, [regression_metric]) From ec91d4984ec5613c2736564d04783c89738e8ee9 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Wed, 17 Jun 2020 13:54:48 -0700 Subject: [PATCH 04/23] Shuffling --- .../hyper/tests/test_grid_hyperparam_opt.py | 152 ++++++++++++++++++ deepchem/hyper/tests/test_hyperparam_opt.py | 148 +---------------- 2 files changed, 154 insertions(+), 146 deletions(-) create mode 100644 deepchem/hyper/tests/test_grid_hyperparam_opt.py diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py new file mode 100644 index 0000000000..845a94456d --- /dev/null +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -0,0 +1,152 @@ +""" +Tests for Grid hyperparam optimization. +""" +import os +import unittest +import tempfile +import shutil +import numpy as np +import tensorflow as tf +import deepchem as dc +import sklearn +from sklearn.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestRegressor + + + +class TestGridHyperparamOpt(unittest.TestCase): + """ + Test grid hyperparameter optimization API. + """ + + def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): + """Test of hyperparam_opt with singletask RF ECFP regression API.""" + featurizer = dc.feat.CircularFingerprint(size=1024) + tasks = ["log-solubility"] + current_dir = os.path.dirname(os.path.abspath(__file__)) + input_file = os.path.join(current_dir, "../../models/tests/example.csv") + loader = dc.data.CSVLoader( + tasks=tasks, smiles_field="smiles", featurizer=featurizer) + dataset = loader.featurize(input_file) + + splitter = dc.splits.ScaffoldSplitter() + train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( + dataset) + + transformers = [ + dc.trans.NormalizationTransformer( + transform_y=True, dataset=train_dataset) + ] + for dataset in [train_dataset, test_dataset]: + for transformer in transformers: + dataset = transformer.transform(dataset) + + params_dict = {"n_estimators": [10, 100]} + metric = dc.metrics.Metric(dc.metrics.r2_score) + + def rf_model_builder(model_params, model_dir): + sklearn_model = RandomForestRegressor(**model_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + logdir=None) + + def test_singletask_to_multitask_sklearn_hyperparam_opt(self): + """Test of hyperparam_opt with singletask_to_multitask.""" + tasks = [ + "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", + "task8", "task9", "task10", "task11", "task12", "task13", "task14", + "task15", "task16" + ] + input_file = "multitask_example.csv" + + n_features = 10 + n_tasks = len(tasks) + # Define train dataset + n_train = 100 + X_train = np.random.rand(n_train, n_features) + y_train = np.random.randint(2, size=(n_train, n_tasks)) + w_train = np.ones_like(y_train) + ids_train = ["C"] * n_train + + train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train, w_train, + ids_train, tasks) + + # Define validation dataset + n_valid = 10 + X_valid = np.random.rand(n_valid, n_features) + y_valid = np.random.randint(2, size=(n_valid, n_tasks)) + w_valid = np.ones_like(y_valid) + ids_valid = ["C"] * n_valid + valid_dataset = dc.data.DiskDataset.from_numpy(X_valid, y_valid, w_valid, + ids_valid, tasks) + + transformers = [] + classification_metric = dc.metrics.Metric( + dc.metrics.matthews_corrcoef, np.mean, mode="classification") + params_dict = {"n_estimators": [1, 10]} + + def multitask_model_builder(model_params, model_dir): + + def model_builder(model_dir): + sklearn_model = RandomForestClassifier(**model_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + return dc.models.SingletaskToMultitask(tasks, model_builder, model_dir) + + optimizer = dc.hyper.GridHyperparamOpt(multitask_model_builder) + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + classification_metric, + logdir=None) + + def test_multitask_tf_mlp_ECFP_classification_hyperparam_opt(self): + """Straightforward test of Tensorflow multitask deepchem classification API.""" + task_type = "classification" + + current_dir = os.path.dirname(os.path.abspath(__file__)) + input_file = os.path.join(current_dir, + "../../models/tests/multitask_example.csv") + tasks = [ + "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", + "task8", "task9", "task10", "task11", "task12", "task13", "task14", + "task15", "task16" + ] + + n_features = 1024 + featurizer = dc.feat.CircularFingerprint(size=n_features) + + loader = dc.data.CSVLoader( + tasks=tasks, smiles_field="smiles", featurizer=featurizer) + dataset = loader.featurize(input_file) + + splitter = dc.splits.ScaffoldSplitter() + train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( + dataset) + + transformers = [] + metric = dc.metrics.Metric( + dc.metrics.roc_auc_score, np.mean, mode="classification") + params_dict = {"layer_sizes": [(10,), (100,)]} + + def model_builder(model_params, model_dir): + return dc.models.MultitaskClassifier( + len(tasks), n_features, model_dir=model_dir, **model_params) + + optimizer = dc.hyper.GridHyperparamOpt(model_builder) + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + logdir=None) diff --git a/deepchem/hyper/tests/test_hyperparam_opt.py b/deepchem/hyper/tests/test_hyperparam_opt.py index 22d300e68d..1507133a9a 100644 --- a/deepchem/hyper/tests/test_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_hyperparam_opt.py @@ -1,17 +1,10 @@ """ Tests for hyperparam optimization. """ -import os + import unittest -import tempfile -import shutil -import numpy as np -import tensorflow as tf -import deepchem as dc import sklearn -from sklearn.ensemble import RandomForestClassifier -from sklearn.ensemble import RandomForestRegressor - +import deepchem as dc class TestHyperparamOpt(unittest.TestCase): """ @@ -32,140 +25,3 @@ def rf_model_builder(model_params, model_dir): initialized = False assert not initialized - -class TestGridHyperparamOpt(unittest.TestCase): - """ - Test grid hyperparameter optimization API. - """ - - def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): - """Test of hyperparam_opt with singletask RF ECFP regression API.""" - featurizer = dc.feat.CircularFingerprint(size=1024) - tasks = ["log-solubility"] - current_dir = os.path.dirname(os.path.abspath(__file__)) - input_file = os.path.join(current_dir, "../../models/tests/example.csv") - loader = dc.data.CSVLoader( - tasks=tasks, smiles_field="smiles", featurizer=featurizer) - dataset = loader.featurize(input_file) - - splitter = dc.splits.ScaffoldSplitter() - train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( - dataset) - - transformers = [ - dc.trans.NormalizationTransformer( - transform_y=True, dataset=train_dataset) - ] - for dataset in [train_dataset, test_dataset]: - for transformer in transformers: - dataset = transformer.transform(dataset) - - params_dict = {"n_estimators": [10, 100]} - metric = dc.metrics.Metric(dc.metrics.r2_score) - - def rf_model_builder(model_params, model_dir): - sklearn_model = RandomForestRegressor(**model_params) - return dc.models.SklearnModel(sklearn_model, model_dir) - - optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric, - logdir=None) - - def test_singletask_to_multitask_sklearn_hyperparam_opt(self): - """Test of hyperparam_opt with singletask_to_multitask.""" - tasks = [ - "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", - "task8", "task9", "task10", "task11", "task12", "task13", "task14", - "task15", "task16" - ] - input_file = "multitask_example.csv" - - n_features = 10 - n_tasks = len(tasks) - # Define train dataset - n_train = 100 - X_train = np.random.rand(n_train, n_features) - y_train = np.random.randint(2, size=(n_train, n_tasks)) - w_train = np.ones_like(y_train) - ids_train = ["C"] * n_train - - train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train, w_train, - ids_train, tasks) - - # Define validation dataset - n_valid = 10 - X_valid = np.random.rand(n_valid, n_features) - y_valid = np.random.randint(2, size=(n_valid, n_tasks)) - w_valid = np.ones_like(y_valid) - ids_valid = ["C"] * n_valid - valid_dataset = dc.data.DiskDataset.from_numpy(X_valid, y_valid, w_valid, - ids_valid, tasks) - - transformers = [] - classification_metric = dc.metrics.Metric( - dc.metrics.matthews_corrcoef, np.mean, mode="classification") - params_dict = {"n_estimators": [1, 10]} - - def multitask_model_builder(model_params, model_dir): - - def model_builder(model_dir): - sklearn_model = RandomForestClassifier(**model_params) - return dc.models.SklearnModel(sklearn_model, model_dir) - - return dc.models.SingletaskToMultitask(tasks, model_builder, model_dir) - - optimizer = dc.hyper.GridHyperparamOpt(multitask_model_builder) - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - classification_metric, - logdir=None) - - def test_multitask_tf_mlp_ECFP_classification_hyperparam_opt(self): - """Straightforward test of Tensorflow multitask deepchem classification API.""" - task_type = "classification" - - current_dir = os.path.dirname(os.path.abspath(__file__)) - input_file = os.path.join(current_dir, - "../../models/tests/multitask_example.csv") - tasks = [ - "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", - "task8", "task9", "task10", "task11", "task12", "task13", "task14", - "task15", "task16" - ] - - n_features = 1024 - featurizer = dc.feat.CircularFingerprint(size=n_features) - - loader = dc.data.CSVLoader( - tasks=tasks, smiles_field="smiles", featurizer=featurizer) - dataset = loader.featurize(input_file) - - splitter = dc.splits.ScaffoldSplitter() - train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( - dataset) - - transformers = [] - metric = dc.metrics.Metric( - dc.metrics.roc_auc_score, np.mean, mode="classification") - params_dict = {"layer_sizes": [(10,), (100,)]} - - def model_builder(model_params, model_dir): - return dc.models.MultitaskClassifier( - len(tasks), n_features, model_dir=model_dir, **model_params) - - optimizer = dc.hyper.GridHyperparamOpt(model_builder) - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric, - logdir=None) From 20b0a55b5e53fb7cfba057f98d540e84c5a6382e Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Wed, 17 Jun 2020 19:00:42 -0700 Subject: [PATCH 05/23] Refactoring gaussian process optimizer. --- deepchem/hyper/base_classes.py | 123 ++++++++++++++- deepchem/hyper/gaussian_process.py | 140 +++++++++--------- deepchem/hyper/grid_search.py | 7 +- .../tests/test_gaussian_hyperparam_opt.py | 18 +-- .../hyper/tests/test_grid_hyperparam_opt.py | 3 +- 5 files changed, 201 insertions(+), 90 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index d2480f03e4..9ad7069c3c 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -1,3 +1,71 @@ + +def compute_parameter_search_space(params_dict, search_range): + """Convenience Function to compute parameter search space. + + Parameters + ---------- + params_dict: dict + Dictionary mapping strings to Ints/Floats/Lists. For those + parameters in which int/float is specified, an explicit list of + parameters is computed with `search_range`. Parameters in `hp_invalid_list` + search_range: int(float) (default 4) + For int/float values in `params_dict`, computes optimization range + on `[initial values / search_range, initial values * + search_range]` + + Returns + ------- + expanded_params: dict + Expanded dictionary of parameters where all int/float values in + `params_dict` are expanded out into explicit search ranges. + """ + hyper_parameters = params_dict + hp_list = list(hyper_parameters.keys()) + + for hp in hp_invalid_list: + if hp in hp_list: + hp_list.remove(hp) + + hp_list_class = [hyper_parameters[hp].__class__ for hp in hp_list] + # Check the type is correct + if not (set(hp_list_class) <= set([list, int, float])): + raise ValueError("params_dict must contain values that are lists/ints/floats.") + + # Float or int hyper parameters(ex. batch_size, learning_rate) + hp_list_single = [ + hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list + ] + + # List of float or int hyper parameters(ex. layer_sizes) + hp_list_multiple = [(hp_list[i], len(hyper_parameters[hp_list[i]])) + for i in range(len(hp_list)) + if hp_list_class[i] is list] + + # Range of optimization + param_range = [] + for hp in hp_list_single: + if hyper_parameters[hp].__class__ is int: + param_range.append((('int'), [ + hyper_parameters[hp] // search_range, + hyper_parameters[hp] * search_range + ])) + else: + param_range.append((('cont'), [ + hyper_parameters[hp] / search_range, + hyper_parameters[hp] * search_range + ])) + for hp in hp_list_multiple: + if hyper_parameters[hp[0]][0].__class__ is int: + param_range.extend([(('int'), [ + hyper_parameters[hp[0]][i] // search_range, + hyper_parameters[hp[0]][i] * search_range + ]) for i in range(hp[1])]) + else: + param_range.extend([(('cont'), [ + hyper_parameters[hp[0]][i] / search_range, + hyper_parameters[hp[0]][i] * search_range + ]) for i in range(hp[1])]) + class HyperparamOpt(object): """Abstract superclass for hyperparameter search classes. @@ -13,9 +81,14 @@ class is invoked, this class is used to construct many different strategy for searching the hyperparameter evaluation space. This class itself is an abstract superclass and should never be directly instantiated. + + Objects of this class maintains a list of constants, + `hp_invalid_list` that contains a list of model parameters which + cannot be optimized over This list is used to catch user errors. You + can customize this list in the constructor. """ - def __init__(self, model_class): + def __init__(self, model_class, hp_invalid_list=['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']): """Initialize Hyperparameter Optimizer. Note this is an abstract constructor which should only be used by @@ -39,9 +112,57 @@ def __init__(self, model_class): must accept two arguments, `model_params` of type `dict` and `model_dir`, a string specifying a path to a model directory. See the example. + hp_invalid_list: list, (default `['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']`) """ if self.__class__.__name__ == "HyperparamOpt": raise ValueError( "HyperparamOpt is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead." ) self.model_class = model_class + self.hp_invalid_list = hp_invalid_list + + def hyperparam_search(self, + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + use_max=True, + logdir=None): + """Conduct Hyperparameter search. + + This method defines the common API shared by all hyperparameter + optimization subclasses. Different classes will implement + different search methods but they must all follow this common API. + + Parameters + ---------- + params_dict: dict + Dictionary mapping strings to Ints/Floats/Lists. For those + parameters in which int/float is specified, an explicit list of + parameters is computed with `search_range`. + train_dataset: `dc.data.Dataset` + dataset used for training + valid_dataset: `dc.data.Dataset` + dataset used for validation(optimization on valid scores) + output_transformers: list[dc.trans.Transformer] + Transformers for evaluation. This argument is needed since + `train_dataset` and `valid_dataset` may have been transformed + for learning and need the transform to be inverted before + the metric can be evaluated on a model. + use_max: bool, optional + If True, return the model with the highest score. Else return + model with the minimum score. + logdir: str, optional + The directory in which to store created models. If not set, will + use a temporary directory. + + Returns + ------- + `(best_model, best_hyperparams, all_scores)` where `best_model` is + an instance of `dc.model.Models`, `best_hyperparams` is a + dictionary of parameters, and `all_scores` is a dictionary mapping + string representations of hyperparameter sets to validation + scores. + """ + raise NotImplementedError diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 1eb5e5d759..366127ca56 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -16,6 +16,13 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): """ Gaussian Process Global Optimization(GPGO) + + This class uses Gaussian Process optimization to select + hyperparameters. Note that this class can only optimize 20 + parameters at a time. + + TODO: This class is too tied up with the MoleculeNet benchmarking. + This needs to be refactored out cleanly. """ def hyperparam_search( @@ -23,46 +30,34 @@ def hyperparam_search( params_dict, train_dataset, valid_dataset, - output_transformers, + transformers, metric, - direction=True, + use_max=True, + logdir=None, n_features=1024, n_tasks=1, max_iter=20, search_range=4, - hp_invalid_list=[ - 'seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', - 'n_pair_feat', 'fit_transformers', 'min_child_weight', - 'max_delta_step', 'subsample', 'colsample_bylevel', - 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', - 'base_score' - ], log_file='GPhypersearch.log'): - """Perform hyperparams search using a gaussian process assumption - - `params_dict` should map names of parameters being optimized to a - list of parameter values, which should only contain int, float and - list of int(float). Parameters with names in hp_invalid_list will - not be changed/ - - For Molnet models, self.model_class is model name in string, - params_dict = dc.molnet.preset_hyper_parameters.hps[self.model_class] + """Perform hyperparameter search using a gaussian process. Parameters ---------- params_dict: dict dict including parameters and their initial values - parameters not suitable for optimization can be added to hp_invalid_list train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` dataset used for validation(optimization on valid scores) - output_transformers: list[dc.trans.Transformer] + transformers: list[dc.trans.Transformer] transformers for evaluation metric: `dc.metrics.Metric` metric used for evaluation - direction: bool, (default True) + use_max: bool, (default True) maximization(True) or minimization(False) + logdir: str, optional + The directory in which to store created models. If not set, will + use a temporary directory. n_features: int, (default 1024) number of input features n_tasks: int, (default 1) @@ -72,7 +67,6 @@ def hyperparam_search( search_range: int(float) (default 4) optimization on [initial values / search_range, initial values * search_range] - hp_invalid_list: list, (default `['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']`) names of parameters that should not be optimized logfile: string name of log file, hyperparameters and results for each trial @@ -80,56 +74,55 @@ def hyperparam_search( Returns ------- - hyper_parameters: dict - params_dict with all optimized values - valid_performance_opt: float - best performance on valid dataset + `(best_model, best_hyperparams, all_scores)` where `best_model` is + an instance of `dc.model.Models`, `best_hyperparams` is a + dictionary of parameters, and `all_scores` is a dictionary mapping + string representations of hyperparameter sets to validation + scores. """ - hyper_parameters = params_dict - hp_list = list(hyper_parameters.keys()) - for hp in hp_invalid_list: - if hp in hp_list: - hp_list.remove(hp) - - hp_list_class = [hyper_parameters[hp].__class__ for hp in hp_list] - assert set(hp_list_class) <= set([list, int, float]) - # Float or int hyper parameters(ex. batch_size, learning_rate) - hp_list_single = [ - hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list - ] - # List of float or int hyper parameters(ex. layer_sizes) - hp_list_multiple = [(hp_list[i], len(hyper_parameters[hp_list[i]])) - for i in range(len(hp_list)) - if hp_list_class[i] is list] + if len(params_dict) > 20: + raise ValueError("This class can only search over 20 parameters in one invocation.") + #hyper_parameters = params_dict + #hp_list = list(hyper_parameters.keys()) + #hp_list_class = [hyper_parameters[hp].__class__ for hp in hp_list] + #assert set(hp_list_class) <= set([list, int, float]) + ## Float or int hyper parameters(ex. batch_size, learning_rate) + #hp_list_single = [ + # hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list + #] + ## List of float or int hyper parameters(ex. layer_sizes) + #hp_list_multiple = [(hp_list[i], len(hyper_parameters[hp_list[i]])) + # for i in range(len(hp_list)) + # if hp_list_class[i] is list] # Number of parameters n_param = len(hp_list_single) if len(hp_list_multiple) > 0: n_param = n_param + sum([hp[1] for hp in hp_list_multiple]) - # Range of optimization - param_range = [] - for hp in hp_list_single: - if hyper_parameters[hp].__class__ is int: - param_range.append((('int'), [ - hyper_parameters[hp] // search_range, - hyper_parameters[hp] * search_range - ])) - else: - param_range.append((('cont'), [ - hyper_parameters[hp] / search_range, - hyper_parameters[hp] * search_range - ])) - for hp in hp_list_multiple: - if hyper_parameters[hp[0]][0].__class__ is int: - param_range.extend([(('int'), [ - hyper_parameters[hp[0]][i] // search_range, - hyper_parameters[hp[0]][i] * search_range - ]) for i in range(hp[1])]) - else: - param_range.extend([(('cont'), [ - hyper_parameters[hp[0]][i] / search_range, - hyper_parameters[hp[0]][i] * search_range - ]) for i in range(hp[1])]) + ## Range of optimization + #param_range = [] + #for hp in hp_list_single: + # if hyper_parameters[hp].__class__ is int: + # param_range.append((('int'), [ + # hyper_parameters[hp] // search_range, + # hyper_parameters[hp] * search_range + # ])) + # else: + # param_range.append((('cont'), [ + # hyper_parameters[hp] / search_range, + # hyper_parameters[hp] * search_range + # ])) + #for hp in hp_list_multiple: + # if hyper_parameters[hp[0]][0].__class__ is int: + # param_range.extend([(('int'), [ + # hyper_parameters[hp[0]][i] // search_range, + # hyper_parameters[hp[0]][i] * search_range + # ]) for i in range(hp[1])]) + # else: + # param_range.extend([(('cont'), [ + # hyper_parameters[hp[0]][i] / search_range, + # hyper_parameters[hp[0]][i] * search_range + # ]) for i in range(hp[1])]) # Dummy names param_name = ['l' + format(i, '02d') for i in range(20)] @@ -159,6 +152,7 @@ def f(l00=0, l18=0, l19=0): """ Optimizing function + Take in hyper parameter values and return valid set performances Parameters @@ -200,7 +194,7 @@ def f(l00=0, train_dataset, valid_dataset, valid_dataset, ['task_placeholder'] * n_tasks, - output_transformers, + transformers, n_features, metric, self.model_class, @@ -210,7 +204,7 @@ def f(l00=0, train_dataset, valid_dataset, valid_dataset, ['task_placeholder'] * n_tasks, - output_transformers, + transformers, n_features, metric, self.model_class, @@ -221,7 +215,7 @@ def f(l00=0, model = self.model_class(hyper_parameters, model_dir) model.fit(train_dataset, **hyper_parameters) model.save() - evaluator = Evaluator(model, valid_dataset, output_transformers) + evaluator = Evaluator(model, valid_dataset, transformers) multitask_scores = evaluator.compute_model_performance([metric]) score = multitask_scores[metric.name] @@ -230,7 +224,7 @@ def f(l00=0, f.write(str(score)) f.write('\n') # GPGO maximize performance by default, set performance to its negative value for minimization - if direction: + if use_max: return score else: return -score @@ -274,7 +268,7 @@ def f(l00=0, train_dataset, valid_dataset, valid_dataset, ['task_placeholder'] * n_tasks, - output_transformers, + transformers, n_features, metric, self.model_class, @@ -284,7 +278,7 @@ def f(l00=0, train_dataset, valid_dataset, valid_dataset, ['task_placeholder'] * n_tasks, - output_transformers, + transformers, n_features, metric, self.model_class, @@ -294,7 +288,7 @@ def f(l00=0, # Record performances f.write(str(score)) f.write('\n') - if not direction: + if not use_max: score = -score if score > valid_performance_opt: # Optimized model is better, return hyperparameters diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index 2ff782f516..dcadabac56 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -39,13 +39,14 @@ def hyperparam_search(self, Parameters ---------- - params_dict: dict - dict including parameters and their initial values. + params_dict: Dict[str, list] + Maps hyperparameter names (strings) to lists of possible + parameter values. train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` dataset used for validation(optimization on valid scores) - output_transformers: list of dc.trans.Transformer + output_transformers: list[dc.trans.Transformer] transformers for evaluation metric: dc.metrics.Metric metric used for evaluation diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index a0a3fda39f..1e292e29b1 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -23,20 +23,16 @@ def rf_model_builder(model_params, model_dir): valid_dataset = dc.data.NumpyDataset( X=np.random.rand(20, 5), y=np.random.rand(20, 1)) optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) - params_dict = {"n_estimators": 40} + params_dict = {"n_estimators": [10, 100]} transformers = [ dc.trans.NormalizationTransformer( transform_y=True, dataset=train_dataset) ] metric = dc.metrics.Metric(dc.metrics.r2_score) - best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, train_dataset, valid_dataset, transformers, metric) - - ######################################## - print("best_hyperparams") - print(best_hyperparams) - print("all_results") - print(all_results) - assert 0 == 1 - ######################################## + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric) diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py index 845a94456d..d75f0c7888 100644 --- a/deepchem/hyper/tests/test_grid_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -1,5 +1,5 @@ """ -Tests for Grid hyperparam optimization. +Tests for hyperparam optimization. """ import os import unittest @@ -13,7 +13,6 @@ from sklearn.ensemble import RandomForestRegressor - class TestGridHyperparamOpt(unittest.TestCase): """ Test grid hyperparameter optimization API. From 5314446e36eb7bc87b875f7c05c3a29d7a8f5a09 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Thu, 18 Jun 2020 18:07:39 -0700 Subject: [PATCH 06/23] Changes --- deepchem/hyper/base_classes.py | 45 ++--- deepchem/hyper/gaussian_process.py | 168 +++++------------- deepchem/hyper/grid_search.py | 5 +- .../tests/test_gaussian_hyperparam_opt.py | 2 +- .../hyper/tests/test_grid_hyperparam_opt.py | 19 +- 5 files changed, 82 insertions(+), 157 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 9ad7069c3c..d3e16ac318 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -1,5 +1,5 @@ -def compute_parameter_search_space(params_dict, search_range): +def compute_parameter_range(params_dict, search_range): """Convenience Function to compute parameter search space. Parameters @@ -7,7 +7,7 @@ def compute_parameter_search_space(params_dict, search_range): params_dict: dict Dictionary mapping strings to Ints/Floats/Lists. For those parameters in which int/float is specified, an explicit list of - parameters is computed with `search_range`. Parameters in `hp_invalid_list` + parameters is computed with `search_range`. search_range: int(float) (default 4) For int/float values in `params_dict`, computes optimization range on `[initial values / search_range, initial values * @@ -19,14 +19,9 @@ def compute_parameter_search_space(params_dict, search_range): Expanded dictionary of parameters where all int/float values in `params_dict` are expanded out into explicit search ranges. """ - hyper_parameters = params_dict - hp_list = list(hyper_parameters.keys()) + hp_list = list(params_dict.keys()) - for hp in hp_invalid_list: - if hp in hp_list: - hp_list.remove(hp) - - hp_list_class = [hyper_parameters[hp].__class__ for hp in hp_list] + hp_list_class = [params_dict[hp].__class__ for hp in hp_list] # Check the type is correct if not (set(hp_list_class) <= set([list, int, float])): raise ValueError("params_dict must contain values that are lists/ints/floats.") @@ -37,34 +32,35 @@ def compute_parameter_search_space(params_dict, search_range): ] # List of float or int hyper parameters(ex. layer_sizes) - hp_list_multiple = [(hp_list[i], len(hyper_parameters[hp_list[i]])) + hp_list_multiple = [(hp_list[i], len(params_dict[hp_list[i]])) for i in range(len(hp_list)) if hp_list_class[i] is list] # Range of optimization param_range = [] for hp in hp_list_single: - if hyper_parameters[hp].__class__ is int: + if params_dict[hp].__class__ is int: param_range.append((('int'), [ - hyper_parameters[hp] // search_range, - hyper_parameters[hp] * search_range + params_dict[hp] // search_range, + params_dict[hp] * search_range ])) else: param_range.append((('cont'), [ - hyper_parameters[hp] / search_range, - hyper_parameters[hp] * search_range + params_dict[hp] / search_range, + params_dict[hp] * search_range ])) for hp in hp_list_multiple: - if hyper_parameters[hp[0]][0].__class__ is int: + if params_dict[hp[0]][0].__class__ is int: param_range.extend([(('int'), [ - hyper_parameters[hp[0]][i] // search_range, - hyper_parameters[hp[0]][i] * search_range + params_dict[hp[0]][i] // search_range, + params_dict[hp[0]][i] * search_range ]) for i in range(hp[1])]) else: param_range.extend([(('cont'), [ - hyper_parameters[hp[0]][i] / search_range, - hyper_parameters[hp[0]][i] * search_range + params_dict[hp[0]][i] / search_range, + params_dict[hp[0]][i] * search_range ]) for i in range(hp[1])]) + return hp_list_single, hp_list_multiple, param_range class HyperparamOpt(object): """Abstract superclass for hyperparameter search classes. @@ -81,14 +77,9 @@ class is invoked, this class is used to construct many different strategy for searching the hyperparameter evaluation space. This class itself is an abstract superclass and should never be directly instantiated. - - Objects of this class maintains a list of constants, - `hp_invalid_list` that contains a list of model parameters which - cannot be optimized over This list is used to catch user errors. You - can customize this list in the constructor. """ - def __init__(self, model_class, hp_invalid_list=['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']): + def __init__(self, model_class): """Initialize Hyperparameter Optimizer. Note this is an abstract constructor which should only be used by @@ -112,14 +103,12 @@ def __init__(self, model_class, hp_invalid_list=['seed', 'nb_epoch', 'penalty_ty must accept two arguments, `model_params` of type `dict` and `model_dir`, a string specifying a path to a model directory. See the example. - hp_invalid_list: list, (default `['seed', 'nb_epoch', 'penalty_type', 'dropouts', 'bypass_dropouts', 'n_pair_feat', 'fit_transformers', 'min_child_weight', 'max_delta_step', 'subsample', 'colsample_bylevel', 'colsample_bytree', 'reg_alpha', 'reg_lambda', 'scale_pos_weight', 'base_score']`) """ if self.__class__.__name__ == "HyperparamOpt": raise ValueError( "HyperparamOpt is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead." ) self.model_class = model_class - self.hp_invalid_list = hp_invalid_list def hyperparam_search(self, params_dict, diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 366127ca56..b3baa3e885 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -6,9 +6,9 @@ import tempfile import os import deepchem +from deepchem.hyper.base_classes import compute_parameter_range from deepchem.hyper.base_classes import HyperparamOpt from deepchem.utils.evaluate import Evaluator -from deepchem.molnet.run_benchmark_models import benchmark_classification, benchmark_regression logger = logging.getLogger(__name__) @@ -34,11 +34,9 @@ def hyperparam_search( metric, use_max=True, logdir=None, - n_features=1024, - n_tasks=1, max_iter=20, search_range=4, - log_file='GPhypersearch.log'): + logfile=None): """Perform hyperparameter search using a gaussian process. Parameters @@ -58,19 +56,17 @@ def hyperparam_search( logdir: str, optional The directory in which to store created models. If not set, will use a temporary directory. - n_features: int, (default 1024) - number of input features - n_tasks: int, (default 1) - number of tasks max_iter: int, (default 20) number of optimization trials search_range: int(float) (default 4) optimization on [initial values / search_range, initial values * search_range] names of parameters that should not be optimized - logfile: string - name of log file, hyperparameters and results for each trial - will be recorded + logfile: str + Name of logfile to write results to. If specified, this is must + be a valid file. If not specified, results of hyperparameter + search will be written to `logdir/.txt`. + Returns ------- @@ -82,54 +78,27 @@ def hyperparam_search( """ if len(params_dict) > 20: raise ValueError("This class can only search over 20 parameters in one invocation.") - #hyper_parameters = params_dict - #hp_list = list(hyper_parameters.keys()) - #hp_list_class = [hyper_parameters[hp].__class__ for hp in hp_list] - #assert set(hp_list_class) <= set([list, int, float]) - ## Float or int hyper parameters(ex. batch_size, learning_rate) - #hp_list_single = [ - # hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list - #] - ## List of float or int hyper parameters(ex. layer_sizes) - #hp_list_multiple = [(hp_list[i], len(hyper_parameters[hp_list[i]])) - # for i in range(len(hp_list)) - # if hp_list_class[i] is list] + data_dir = deepchem.utils.get_data_dir() + # Specify logfile + if logfile: + log_file = logfile + elif logdir is not None: + log_file = os.path.join(model_dir, log_file) + else: + log_file = None + + hyper_parameters = params_dict + hp_list_single, hp_list_multiple, param_range = compute_parameter_range(params_dict, search_range) # Number of parameters n_param = len(hp_list_single) if len(hp_list_multiple) > 0: n_param = n_param + sum([hp[1] for hp in hp_list_multiple]) - ## Range of optimization - #param_range = [] - #for hp in hp_list_single: - # if hyper_parameters[hp].__class__ is int: - # param_range.append((('int'), [ - # hyper_parameters[hp] // search_range, - # hyper_parameters[hp] * search_range - # ])) - # else: - # param_range.append((('cont'), [ - # hyper_parameters[hp] / search_range, - # hyper_parameters[hp] * search_range - # ])) - #for hp in hp_list_multiple: - # if hyper_parameters[hp[0]][0].__class__ is int: - # param_range.extend([(('int'), [ - # hyper_parameters[hp[0]][i] // search_range, - # hyper_parameters[hp[0]][i] * search_range - # ]) for i in range(hp[1])]) - # else: - # param_range.extend([(('cont'), [ - # hyper_parameters[hp[0]][i] / search_range, - # hyper_parameters[hp[0]][i] * search_range - # ]) for i in range(hp[1])]) # Dummy names param_name = ['l' + format(i, '02d') for i in range(20)] param = dict(zip(param_name[:n_param], param_range)) - data_dir = deepchem.utils.get_data_dir() - log_file = os.path.join(data_dir, log_file) def f(l00=0, l01=0, @@ -183,46 +152,37 @@ def f(l00=0, i = i + hp[1] logger.info(hyper_parameters) - # Run benchmark - with open(log_file, 'a') as f: - # Record hyperparameters - f.write(str(hyper_parameters)) - f.write('\n') - if isinstance(self.model_class, str): + if log_file: + # Run benchmark + with open(log_file, 'a') as f: + # Record hyperparameters + f.write(str(hyper_parameters)) + f.write('\n') + + + if logdir is not None: + model_dir = os.path.join(logdir, str(ind)) + logger.info("model_dir is %s" % model_dir) try: - train_scores, valid_scores, _ = benchmark_classification( - train_dataset, - valid_dataset, - valid_dataset, ['task_placeholder'] * n_tasks, - transformers, - n_features, - metric, - self.model_class, - hyper_parameters=hyper_parameters) - except AssertionError: - train_scores, valid_scores, _ = benchmark_regression( - train_dataset, - valid_dataset, - valid_dataset, ['task_placeholder'] * n_tasks, - transformers, - n_features, - metric, - self.model_class, - hyper_parameters=hyper_parameters) - score = valid_scores[self.model_class][metric[0].name] + os.makedirs(model_dir) + except OSError: + if not os.path.isdir(model_dir): + logger.info("Error creating model_dir, using tempfile directory") + model_dir = tempfile.mkdtemp() else: model_dir = tempfile.mkdtemp() - model = self.model_class(hyper_parameters, model_dir) - model.fit(train_dataset, **hyper_parameters) - model.save() - evaluator = Evaluator(model, valid_dataset, transformers) - multitask_scores = evaluator.compute_model_performance([metric]) - score = multitask_scores[metric.name] - - with open(log_file, 'a') as f: - # Record performances - f.write(str(score)) - f.write('\n') + model = self.model_class(hyper_parameters, model_dir) + model.fit(train_dataset, **hyper_parameters) + model.save() + evaluator = Evaluator(model, valid_dataset, transformers) + multitask_scores = evaluator.compute_model_performance([metric]) + score = multitask_scores[metric.name] + + if log_file: + with open(log_file, 'a') as f: + # Record performances + f.write(str(score)) + f.write('\n') # GPGO maximize performance by default, set performance to its negative value for minimization if use_max: return score @@ -258,41 +218,11 @@ def f(l00=0, i = i + hp[1] # Compare best model to default hyperparameters - with open(log_file, 'a') as f: - # Record hyperparameters - f.write(str(params_dict)) - f.write('\n') - if isinstance(self.model_class, str): - try: - train_scores, valid_scores, _ = benchmark_classification( - train_dataset, - valid_dataset, - valid_dataset, ['task_placeholder'] * n_tasks, - transformers, - n_features, - metric, - self.model_class, - hyper_parameters=params_dict) - except AssertionError: - train_scores, valid_scores, _ = benchmark_regression( - train_dataset, - valid_dataset, - valid_dataset, ['task_placeholder'] * n_tasks, - transformers, - n_features, - metric, - self.model_class, - hyper_parameters=params_dict) - score = valid_scores[self.model_class][metric[0].name] + if log_file: with open(log_file, 'a') as f: - # Record performances - f.write(str(score)) + # Record hyperparameters + f.write(str(params_dict)) f.write('\n') - if not use_max: - score = -score - if score > valid_performance_opt: - # Optimized model is better, return hyperparameters - return params_dict, score # Return default hyperparameters return hyper_parameters, valid_performance_opt diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index dcadabac56..344e89ffd5 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -13,6 +13,7 @@ from deepchem.utils.evaluate import Evaluator from deepchem.hyper.base_classes import HyperparamOpt +logger = logging.getLogger(__name__) class GridHyperparamOpt(HyperparamOpt): """ @@ -98,8 +99,8 @@ def hyperparam_search(self, model_dir = tempfile.mkdtemp() else: model_dir = tempfile.mkdtemp() - - model = self.model_class(model_params, model_dir) + model_params['model_dir'] = model_dir + model = self.model_class(**model_params) model.fit(train_dataset) evaluator = Evaluator(model, valid_dataset, output_transformers) diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 1e292e29b1..503e0aea18 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -23,7 +23,7 @@ def rf_model_builder(model_params, model_dir): valid_dataset = dc.data.NumpyDataset( X=np.random.rand(20, 5), y=np.random.rand(20, 1)) optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) - params_dict = {"n_estimators": [10, 100]} + params_dict = {"n_estimators": 10} transformers = [ dc.trans.NormalizationTransformer( transform_y=True, dataset=train_dataset) diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py index d75f0c7888..b533ee9c3b 100644 --- a/deepchem/hyper/tests/test_grid_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -43,8 +43,10 @@ def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): params_dict = {"n_estimators": [10, 100]} metric = dc.metrics.Metric(dc.metrics.r2_score) - def rf_model_builder(model_params, model_dir): - sklearn_model = RandomForestRegressor(**model_params) + def rf_model_builder(**model_params): + rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] + sklearn_model = RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) @@ -91,10 +93,11 @@ def test_singletask_to_multitask_sklearn_hyperparam_opt(self): dc.metrics.matthews_corrcoef, np.mean, mode="classification") params_dict = {"n_estimators": [1, 10]} - def multitask_model_builder(model_params, model_dir): - + def multitask_model_builder(**model_params): + rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] def model_builder(model_dir): - sklearn_model = RandomForestClassifier(**model_params) + sklearn_model = RandomForestClassifier(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) return dc.models.SingletaskToMultitask(tasks, model_builder, model_dir) @@ -137,9 +140,11 @@ def test_multitask_tf_mlp_ECFP_classification_hyperparam_opt(self): dc.metrics.roc_auc_score, np.mean, mode="classification") params_dict = {"layer_sizes": [(10,), (100,)]} - def model_builder(model_params, model_dir): + def model_builder(**model_params): + model_dir = model_params['model_dir'] + multitask_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} return dc.models.MultitaskClassifier( - len(tasks), n_features, model_dir=model_dir, **model_params) + len(tasks), n_features, model_dir=model_dir, **multitask_params) optimizer = dc.hyper.GridHyperparamOpt(model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( From f4bc57459e575c7111f50a2744c8054d3d43f0d5 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Mon, 22 Jun 2020 16:00:33 -0700 Subject: [PATCH 07/23] Cleanup --- deepchem/hyper/base_classes.py | 69 +--------------- deepchem/hyper/gaussian_process.py | 123 ++++++++++++++++++++++++++--- 2 files changed, 113 insertions(+), 79 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index d3e16ac318..513f18df70 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -1,67 +1,4 @@ -def compute_parameter_range(params_dict, search_range): - """Convenience Function to compute parameter search space. - - Parameters - ---------- - params_dict: dict - Dictionary mapping strings to Ints/Floats/Lists. For those - parameters in which int/float is specified, an explicit list of - parameters is computed with `search_range`. - search_range: int(float) (default 4) - For int/float values in `params_dict`, computes optimization range - on `[initial values / search_range, initial values * - search_range]` - - Returns - ------- - expanded_params: dict - Expanded dictionary of parameters where all int/float values in - `params_dict` are expanded out into explicit search ranges. - """ - hp_list = list(params_dict.keys()) - - hp_list_class = [params_dict[hp].__class__ for hp in hp_list] - # Check the type is correct - if not (set(hp_list_class) <= set([list, int, float])): - raise ValueError("params_dict must contain values that are lists/ints/floats.") - - # Float or int hyper parameters(ex. batch_size, learning_rate) - hp_list_single = [ - hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list - ] - - # List of float or int hyper parameters(ex. layer_sizes) - hp_list_multiple = [(hp_list[i], len(params_dict[hp_list[i]])) - for i in range(len(hp_list)) - if hp_list_class[i] is list] - - # Range of optimization - param_range = [] - for hp in hp_list_single: - if params_dict[hp].__class__ is int: - param_range.append((('int'), [ - params_dict[hp] // search_range, - params_dict[hp] * search_range - ])) - else: - param_range.append((('cont'), [ - params_dict[hp] / search_range, - params_dict[hp] * search_range - ])) - for hp in hp_list_multiple: - if params_dict[hp[0]][0].__class__ is int: - param_range.extend([(('int'), [ - params_dict[hp[0]][i] // search_range, - params_dict[hp[0]][i] * search_range - ]) for i in range(hp[1])]) - else: - param_range.extend([(('cont'), [ - params_dict[hp[0]][i] / search_range, - params_dict[hp[0]][i] * search_range - ]) for i in range(hp[1])]) - return hp_list_single, hp_list_multiple, param_range - class HyperparamOpt(object): """Abstract superclass for hyperparameter search classes. @@ -127,9 +64,9 @@ def hyperparam_search(self, Parameters ---------- params_dict: dict - Dictionary mapping strings to Ints/Floats/Lists. For those - parameters in which int/float is specified, an explicit list of - parameters is computed with `search_range`. + Dictionary mapping strings to Ints/Floats/Lists. Note that the + precise semantics of `params_dict` will change depending on the + optimizer that you're using. train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index b3baa3e885..50323b032c 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -12,17 +12,108 @@ logger = logging.getLogger(__name__) +def compute_parameter_range(params_dict, search_range): + """Convenience Function to compute parameter search space. + + Parameters + ---------- + params_dict: dict + Dictionary mapping strings to Ints/Floats/Lists. For those + parameters in which int/float is specified, an explicit list of + parameters is computed with `search_range`. + search_range: int(float) (default 4) + For int/float values in `params_dict`, computes optimization range + on `[initial values / search_range, initial values * + search_range]` + + Returns + ------- + param_range: list + List of tuples. Each tuple is of form `(value_type, value_range)` + where `value_type` is a string that is either "int" or "cont" and + `value_range` is a list of two elements of the form `[low, hi]` + """ + #hp_list = list(params_dict.keys()) + + #hp_list_class = [params_dict[hp].__class__ for hp in hp_list] + ## Check the type is correct + #if not (set(hp_list_class) <= set([list, int, float])): + # raise ValueError("params_dict must contain values that are lists/ints/floats.") + + ## Float or int hyper parameters(ex. batch_size, learning_rate) + #hp_list_single = [ + # hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list + #] + + ## List of float or int hyper parameters(ex. layer_sizes) + #hp_list_multiple = [(hp_list[i], len(params_dict[hp_list[i]])) + # for i in range(len(hp_list)) + # if hp_list_class[i] is list] + + # Range of optimization + param_range = [] + for hp, value in params_dict.items(): + if isinstance(value, int): + value_range = [value // search_range, value * search_range] + param_range.append(("int", value_range)) + pass + elif isinstance(value, float): + value_range = [value / search_range, value * search_range] + param_range.append(("cont", value_range)) + pass + elif isinstance(value, list): + if len(value) == 0: + raise ValueError("Cannot specify empty lists for hyperparameter search.") + if isinstance(value[0], int): + # Expand out each of the possible values into a range + for val in value: + value_range = [value // search_range, value * search_range] + param_range.append(("int", value_range)) + + elif isinstance(value[0], float): + for val in value: + value_range = [value / search_range, value * search_range] + param_range.append(("cont", value_range)) + return param_range + + #for hp in hp_list_single: + # if params_dict[hp].__class__ is int: + # param_range.append((('int'), [ + # params_dict[hp] // search_range, + # params_dict[hp] * search_range + # ])) + # else: + # param_range.append((('cont'), [ + # params_dict[hp] / search_range, + # params_dict[hp] * search_range + # ])) + #for hp in hp_list_multiple: + # if params_dict[hp[0]][0].__class__ is int: + # param_range.extend([(('int'), [ + # params_dict[hp[0]][i] // search_range, + # params_dict[hp[0]][i] * search_range + # ]) for i in range(hp[1])]) + # else: + # param_range.extend([(('cont'), [ + # params_dict[hp[0]][i] / search_range, + # params_dict[hp[0]][i] * search_range + # ]) for i in range(hp[1])]) + ##return hp_list_single, hp_list_multiple, param_range + #return param_range + class GaussianProcessHyperparamOpt(HyperparamOpt): """ Gaussian Process Global Optimization(GPGO) This class uses Gaussian Process optimization to select - hyperparameters. Note that this class can only optimize 20 - parameters at a time. + hyperparameters. Underneath the hood it uses pyGPGO to optimize + models. If you don't have pyGPGO installed, you won't be able to use + this class. - TODO: This class is too tied up with the MoleculeNet benchmarking. - This needs to be refactored out cleanly. + Note + ---- + This class can only optimize 20 parameters at a time. """ def hyperparam_search( @@ -83,23 +174,29 @@ def hyperparam_search( if logfile: log_file = logfile elif logdir is not None: - log_file = os.path.join(model_dir, log_file) + log_file = os.path.join(logdir, log_file) else: log_file = None - hyper_parameters = params_dict - hp_list_single, hp_list_multiple, param_range = compute_parameter_range(params_dict, search_range) + #hyper_parameters = params_dict + param_range = compute_parameter_range(params_dict, search_range) - # Number of parameters - n_param = len(hp_list_single) - if len(hp_list_multiple) > 0: - n_param = n_param + sum([hp[1] for hp in hp_list_multiple]) + ## Number of parameters + #n_param = len(hp_list_single) + #if len(hp_list_multiple) > 0: + # n_param = n_param + sum([hp[1] for hp in hp_list_multiple]) + # Compute number of different params + n_param = 0 + for val in params_dict.items(): + if isinstance(val, list): + n_param += len(val) + else: + n_param += 1 # Dummy names param_name = ['l' + format(i, '02d') for i in range(20)] param = dict(zip(param_name[:n_param], param_range)) - def f(l00=0, l01=0, l02=0, @@ -120,7 +217,7 @@ def f(l00=0, l17=0, l18=0, l19=0): - """ Optimizing function + """Private Optimizing function Take in hyper parameter values and return valid set performances From 39f73650f12e947ec76e4804470dfa8c2587516a Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 23 Jun 2020 19:55:13 -0700 Subject: [PATCH 08/23] Changes --- deepchem/hyper/base_classes.py | 1 - deepchem/hyper/gaussian_process.py | 225 +++++++++--------- deepchem/hyper/grid_search.py | 6 + .../tests/test_gaussian_hyperparam_opt.py | 14 +- .../hyper/tests/test_grid_hyperparam_opt.py | 17 +- deepchem/hyper/tests/test_hyperparam_opt.py | 2 +- docs/hyper.rst | 7 + 7 files changed, 135 insertions(+), 137 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 513f18df70..699ceee53b 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -1,4 +1,3 @@ - class HyperparamOpt(object): """Abstract superclass for hyperparameter search classes. diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 50323b032c..a541918485 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -6,12 +6,37 @@ import tempfile import os import deepchem -from deepchem.hyper.base_classes import compute_parameter_range from deepchem.hyper.base_classes import HyperparamOpt from deepchem.utils.evaluate import Evaluator logger = logging.getLogger(__name__) + +def _convert_hyperparam_dict_to_filename(hyper_params): + """Helper function that converts a dictionary of hyperparameters to a string that can be a filename. + + Parameters + ---------- + hyper_params: dict + Maps string of hyperparameter name to int/float/list. + + Returns + ------- + filename: str + A filename of form "_key1_value1_value2_..._key2..." + """ + filename = "" + keys = sorted(hyper_params.keys()) + for key in keys: + filename += "_%s" % str(key) + value = hyper_params[key] + if isinstance(value, int): + filename += "_%s" % str(value) + else: + filename += "_%.2f" % value + return filename + + def compute_parameter_range(params_dict, search_range): """Convenience Function to compute parameter search space. @@ -28,78 +53,24 @@ def compute_parameter_range(params_dict, search_range): Returns ------- - param_range: list - List of tuples. Each tuple is of form `(value_type, value_range)` - where `value_type` is a string that is either "int" or "cont" and - `value_range` is a list of two elements of the form `[low, hi]` + param_range: dict + Dictionary mapping hyperparameter names to tuples. Each tuple is + of form `(value_type, value_range)` where `value_type` is a string + that is either "int" or "cont" and `value_range` is a list of two + elements of the form `[low, hi]` """ - #hp_list = list(params_dict.keys()) - - #hp_list_class = [params_dict[hp].__class__ for hp in hp_list] - ## Check the type is correct - #if not (set(hp_list_class) <= set([list, int, float])): - # raise ValueError("params_dict must contain values that are lists/ints/floats.") - - ## Float or int hyper parameters(ex. batch_size, learning_rate) - #hp_list_single = [ - # hp_list[i] for i in range(len(hp_list)) if not hp_list_class[i] is list - #] - - ## List of float or int hyper parameters(ex. layer_sizes) - #hp_list_multiple = [(hp_list[i], len(params_dict[hp_list[i]])) - # for i in range(len(hp_list)) - # if hp_list_class[i] is list] - # Range of optimization - param_range = [] + param_range = {} for hp, value in params_dict.items(): if isinstance(value, int): value_range = [value // search_range, value * search_range] - param_range.append(("int", value_range)) + param_range[hp] = ("int", value_range) pass elif isinstance(value, float): value_range = [value / search_range, value * search_range] - param_range.append(("cont", value_range)) + param_range[hp] = ("cont", value_range) pass - elif isinstance(value, list): - if len(value) == 0: - raise ValueError("Cannot specify empty lists for hyperparameter search.") - if isinstance(value[0], int): - # Expand out each of the possible values into a range - for val in value: - value_range = [value // search_range, value * search_range] - param_range.append(("int", value_range)) - - elif isinstance(value[0], float): - for val in value: - value_range = [value / search_range, value * search_range] - param_range.append(("cont", value_range)) return param_range - - #for hp in hp_list_single: - # if params_dict[hp].__class__ is int: - # param_range.append((('int'), [ - # params_dict[hp] // search_range, - # params_dict[hp] * search_range - # ])) - # else: - # param_range.append((('cont'), [ - # params_dict[hp] / search_range, - # params_dict[hp] * search_range - # ])) - #for hp in hp_list_multiple: - # if params_dict[hp[0]][0].__class__ is int: - # param_range.extend([(('int'), [ - # params_dict[hp[0]][i] // search_range, - # params_dict[hp[0]][i] * search_range - # ]) for i in range(hp[1])]) - # else: - # param_range.extend([(('cont'), [ - # params_dict[hp[0]][i] / search_range, - # params_dict[hp[0]][i] * search_range - # ]) for i in range(hp[1])]) - ##return hp_list_single, hp_list_multiple, param_range - #return param_range class GaussianProcessHyperparamOpt(HyperparamOpt): @@ -111,29 +82,36 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): models. If you don't have pyGPGO installed, you won't be able to use this class. + Note that `params_dict` has a different semantics than for + `GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is + used as the center of a search range. + Note ---- This class can only optimize 20 parameters at a time. """ - def hyperparam_search( - self, - params_dict, - train_dataset, - valid_dataset, - transformers, - metric, - use_max=True, - logdir=None, - max_iter=20, - search_range=4, - logfile=None): + def hyperparam_search(self, + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + use_max=True, + logdir=None, + max_iter=20, + search_range=4, + logfile=None): """Perform hyperparameter search using a gaussian process. Parameters ---------- params_dict: dict - dict including parameters and their initial values + Maps hyperparameter names (strings) to possible parameter + values. The semantics of this list are different than for + `GridHyperparamOpt`. `params_dict[hp]` must map to an int/float, + which is used as the center of a search with radius + `search_range`. train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` @@ -168,7 +146,8 @@ def hyperparam_search( scores. """ if len(params_dict) > 20: - raise ValueError("This class can only search over 20 parameters in one invocation.") + raise ValueError( + "This class can only search over 20 parameters in one invocation.") data_dir = deepchem.utils.get_data_dir() # Specify logfile if logfile: @@ -178,14 +157,11 @@ def hyperparam_search( else: log_file = None - #hyper_parameters = params_dict param_range = compute_parameter_range(params_dict, search_range) + param_range_keys = list(param_range.keys()) + param_range_values = [param_range[key] for key in param_range_keys] - ## Number of parameters - #n_param = len(hp_list_single) - #if len(hp_list_multiple) > 0: - # n_param = n_param + sum([hp[1] for hp in hp_list_multiple]) - # Compute number of different params + # Number of parameters n_param = 0 for val in params_dict.items(): if isinstance(val, list): @@ -195,8 +171,14 @@ def hyperparam_search( # Dummy names param_name = ['l' + format(i, '02d') for i in range(20)] - param = dict(zip(param_name[:n_param], param_range)) + # This is the dictionary of arguments we'll pass to pyGPGO + param = dict(zip(param_name[:n_param], param_range_values)) + + # Stores all results + all_results = {} + # Demarcating internal function for readability + ######################## def f(l00=0, l01=0, l02=0, @@ -232,23 +214,19 @@ def f(l00=0, valid_scores: float valid set performances """ + hyper_parameters = {} + # This is a dictionary of form {'l01': val, ...} binding + # arguments args = locals() - # Input hyper parameters - i = 0 - for hp in hp_list_single: - hyper_parameters[hp] = float(args[param_name[i]]) - if param_range[i][0] == 'int': - hyper_parameters[hp] = int(hyper_parameters[hp]) - i = i + 1 - for hp in hp_list_multiple: - hyper_parameters[hp[0]] = [ - float(args[param_name[j]]) for j in range(i, i + hp[1]) - ] - if param_range[i][0] == 'int': - hyper_parameters[hp[0]] = list(map(int, hyper_parameters[hp[0]])) - i = i + hp[1] - - logger.info(hyper_parameters) + # This bit of code re-associates hyperparameter values to their + # names from the arguments of this local function. + for i, hp in enumerate(param_range_keys): + if isinstance(params_dict[hp], int): + hyper_parameters[hp] = int(args[param_name[i]]) + elif isinstance(params_dict[hp], float): + hyper_parameters[hp] = float(args[param_name[i]]) + + logger.info("Running hyperparameter set: %s" % str(hyper_parameters)) if log_file: # Run benchmark with open(log_file, 'a') as f: @@ -256,9 +234,10 @@ def f(l00=0, f.write(str(hyper_parameters)) f.write('\n') - + hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) if logdir is not None: - model_dir = os.path.join(logdir, str(ind)) + filename = "model%s" % hp_str + model_dir = os.path.join(logdir, filename) logger.info("model_dir is %s" % model_dir) try: os.makedirs(model_dir) @@ -268,9 +247,16 @@ def f(l00=0, model_dir = tempfile.mkdtemp() else: model_dir = tempfile.mkdtemp() - model = self.model_class(hyper_parameters, model_dir) - model.fit(train_dataset, **hyper_parameters) - model.save() + # Add it on to the information needed for the constructor + hyper_parameters["model_dir"] = model_dir + model = self.model_class(**hyper_parameters) + model.fit(train_dataset) + try: + model.save() + # Some models autosave + except NotImplementedError: + pass + evaluator = Evaluator(model, valid_dataset, transformers) multitask_scores = evaluator.compute_model_performance([metric]) score = multitask_scores[metric.name] @@ -280,12 +266,16 @@ def f(l00=0, # Record performances f.write(str(score)) f.write('\n') + # Store all results + all_results[hp_str] = score # GPGO maximize performance by default, set performance to its negative value for minimization if use_max: return score else: return -score + ######################## + import pyGPGO from pyGPGO.covfunc import matern32 from pyGPGO.acquisition import Acquisition @@ -300,19 +290,16 @@ def f(l00=0, hp_opt, valid_performance_opt = gpgo.getResult() # Readout best hyper parameters - i = 0 - for hp in hp_list_single: - hyper_parameters[hp] = float(hp_opt[param_name[i]]) - if param_range[i][0] == 'int': - hyper_parameters[hp] = int(hyper_parameters[hp]) - i = i + 1 - for hp in hp_list_multiple: - hyper_parameters[hp[0]] = [ - float(hp_opt[param_name[j]]) for j in range(i, i + hp[1]) - ] - if param_range[i][0] == 'int': - hyper_parameters[hp[0]] = list(map(int, hyper_parameters[hp[0]])) - i = i + hp[1] + hyper_parameters = {} + for i, hp in enumerate(param_range_keys): + if isinstance(params_dict[hp], int): + hyper_parameters[hp] = int(hp_opt[param_name[i]]) + elif isinstance(params_dict[hp], float): + hyper_parameters[hp] = float(hp_opt[param_name[i]]) + hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) + model_dir = "model%s" % hp_str + hyper_parameters["model_dir"] = model_dir + best_model = self.model_class(**hyper_parameters) # Compare best model to default hyperparameters if log_file: @@ -322,4 +309,4 @@ def f(l00=0, f.write('\n') # Return default hyperparameters - return hyper_parameters, valid_performance_opt + return best_model, hyper_parameters, all_results diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index 344e89ffd5..f0206f2a22 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -15,6 +15,7 @@ logger = logging.getLogger(__name__) + class GridHyperparamOpt(HyperparamOpt): """ Provides simple grid hyperparameter search capabilities. @@ -102,6 +103,11 @@ def hyperparam_search(self, model_params['model_dir'] = model_dir model = self.model_class(**model_params) model.fit(train_dataset) + try: + model.save() + # Some models autosave + except NotImplementedError: + pass evaluator = Evaluator(model, valid_dataset, output_transformers) multitask_scores = evaluator.compute_model_performance([metric]) diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 503e0aea18..17c5586542 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -14,8 +14,10 @@ class TestGaussianHyperparamOpt(unittest.TestCase): def test_rf_example(self): - def rf_model_builder(model_params, model_dir): - sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) + def rf_model_builder(**model_params): + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] + sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) train_dataset = dc.data.NumpyDataset( @@ -28,11 +30,7 @@ def rf_model_builder(model_params, model_dir): dc.trans.NormalizationTransformer( transform_y=True, dataset=train_dataset) ] - metric = dc.metrics.Metric(dc.metrics.r2_score) + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric) + params_dict, train_dataset, valid_dataset, transformers, metric) diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py index b533ee9c3b..c95af0219d 100644 --- a/deepchem/hyper/tests/test_grid_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -18,7 +18,7 @@ class TestGridHyperparamOpt(unittest.TestCase): Test grid hyperparameter optimization API. """ - def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): + def test_rf_hyperparam(self): """Test of hyperparam_opt with singletask RF ECFP regression API.""" featurizer = dc.feat.CircularFingerprint(size=1024) tasks = ["log-solubility"] @@ -44,7 +44,7 @@ def test_singletask_sklearn_rf_ECFP_regression_hyperparam_opt(self): metric = dc.metrics.Metric(dc.metrics.r2_score) def rf_model_builder(**model_params): - rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} model_dir = model_params['model_dir'] sklearn_model = RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) @@ -58,7 +58,7 @@ def rf_model_builder(**model_params): metric, logdir=None) - def test_singletask_to_multitask_sklearn_hyperparam_opt(self): + def test_multitask_rf_hyperparam_opt(self): """Test of hyperparam_opt with singletask_to_multitask.""" tasks = [ "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", @@ -94,8 +94,9 @@ def test_singletask_to_multitask_sklearn_hyperparam_opt(self): params_dict = {"n_estimators": [1, 10]} def multitask_model_builder(**model_params): - rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} model_dir = model_params['model_dir'] + def model_builder(model_dir): sklearn_model = RandomForestClassifier(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) @@ -111,10 +112,8 @@ def model_builder(model_dir): classification_metric, logdir=None) - def test_multitask_tf_mlp_ECFP_classification_hyperparam_opt(self): + def test_mlp_hyperparam_opt(self): """Straightforward test of Tensorflow multitask deepchem classification API.""" - task_type = "classification" - current_dir = os.path.dirname(os.path.abspath(__file__)) input_file = os.path.join(current_dir, "../../models/tests/multitask_example.csv") @@ -142,7 +141,9 @@ def test_multitask_tf_mlp_ECFP_classification_hyperparam_opt(self): def model_builder(**model_params): model_dir = model_params['model_dir'] - multitask_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + multitask_params = { + k: v for (k, v) in model_params.items() if k != 'model_dir' + } return dc.models.MultitaskClassifier( len(tasks), n_features, model_dir=model_dir, **multitask_params) diff --git a/deepchem/hyper/tests/test_hyperparam_opt.py b/deepchem/hyper/tests/test_hyperparam_opt.py index 1507133a9a..92ce092148 100644 --- a/deepchem/hyper/tests/test_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_hyperparam_opt.py @@ -6,6 +6,7 @@ import sklearn import deepchem as dc + class TestHyperparamOpt(unittest.TestCase): """ Test abstract superclass behavior. @@ -24,4 +25,3 @@ def rf_model_builder(model_params, model_dir): except: initialized = False assert not initialized - diff --git a/docs/hyper.rst b/docs/hyper.rst index 8d4de51d83..8809814741 100644 --- a/docs/hyper.rst +++ b/docs/hyper.rst @@ -8,6 +8,13 @@ learning algorithm used for the rest of learning and have to be set in an alternate fashion. The :code:`dc.hyper` module contains utilities for hyperparameter tuning. +DeepChem's hyperparameter optimzation algorithms are simple and run in +single-threaded fashion. They are not intended to be production grade +hyperparameter utilities, but rather useful first tools as you start +exploring your parameter space. As the needs of your application grow, +we recommend swapping to a more hyeavy duty hyperparameter +optimization library. + Hyperparameter Optimization API ------------------------------- From 48d2084a00e4c7dc7e2cda8d52a101a03100980f Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 23 Jun 2020 20:17:42 -0700 Subject: [PATCH 09/23] Changes --- deepchem/hyper/gaussian_process.py | 1 - .../hyperparam_opt/gaussian_hyperparam_opt.py | 16 ++++++++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index a541918485..1cf2a026b6 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -148,7 +148,6 @@ def hyperparam_search(self, if len(params_dict) > 20: raise ValueError( "This class can only search over 20 parameters in one invocation.") - data_dir = deepchem.utils.get_data_dir() # Specify logfile if logfile: log_file = logfile diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py index 4dfdc25906..c70c319a0c 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -11,8 +11,16 @@ # Fit models regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) -# TODO(rbharath): I don't like this awkward string/class divide. Maybe clean up? -optimizer = dc.hyper.GaussianProcessHyperparamOpt('tf_regression') +def rf_model_builder(**model_params): + rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] + sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + +optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) best_hyper_params, best_performance = optimizer.hyperparam_search( - dc.molnet.preset_hyper_parameters.hps['tf_regression'], train, valid, - transformers, [regression_metric]) + params_dict, + train_dataset, + valid_dataset, + transformers, + metric) From 75167dfc27f5fda860340f1d6b041e37868e8e73 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 23 Jun 2020 21:15:16 -0700 Subject: [PATCH 10/23] Fix examples --- .../hyperparam_opt/gaussian_hyperparam_opt.py | 16 ++++++------ .../hyperparam_opt/grid_hyperparam_opt.py | 26 +++++++++++++++++++ 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py index c70c319a0c..0b55e8493e 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -3,24 +3,24 @@ import tensorflow as tf tf.random.set_seed(123) import deepchem as dc +import sklearn # Load delaney dataset delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() train, valid, test = delaney_datasets # Fit models -regression_metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + def rf_model_builder(**model_params): - rf_params = {k:v for (k,v) in model_params.items() if k != 'model_dir'} + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} model_dir = model_params['model_dir'] sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) + +params_dict = {"n_estimators": 30} optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) -best_hyper_params, best_performance = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric) +best_model, best_params, all_results = optimizer.hyperparam_search( + params_dict, train, valid, transformers, metric) diff --git a/examples/hyperparam_opt/grid_hyperparam_opt.py b/examples/hyperparam_opt/grid_hyperparam_opt.py index e69de29bb2..ae0286a7e4 100644 --- a/examples/hyperparam_opt/grid_hyperparam_opt.py +++ b/examples/hyperparam_opt/grid_hyperparam_opt.py @@ -0,0 +1,26 @@ +import numpy as np +np.random.seed(123) +import tensorflow as tf +tf.random.set_seed(123) +import deepchem as dc +import sklearn + +# Load delaney dataset +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +train, valid, test = delaney_datasets + +# Fit models +metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + +def rf_model_builder(**model_params): + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] + sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + +params_dict = {"n_estimators": [10, 30, 50, 100]} +optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) +best_model, best_params, all_results = optimizer.hyperparam_search( + params_dict, train, valid, transformers, metric) From 0539b81fd6737c30e0551db7c351845a9f51628e Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 23 Jun 2020 21:18:33 -0700 Subject: [PATCH 11/23] Fix tests --- scripts/install_deepchem_conda.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/install_deepchem_conda.sh b/scripts/install_deepchem_conda.sh index 2cdcf95577..5861e09ed8 100644 --- a/scripts/install_deepchem_conda.sh +++ b/scripts/install_deepchem_conda.sh @@ -38,5 +38,5 @@ conda install -y -q -c deepchem -c rdkit -c conda-forge -c omnia \ pytest \ pytest-cov \ flaky - +yes | pip install pyGPGO yes | pip install -U matminer tensorflow==2.2 tensorflow-probability==0.10 From cfcc7a4a1fd028400f624fc098584772af1cd33a Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Wed, 24 Jun 2020 12:13:12 -0700 Subject: [PATCH 12/23] doc --- deepchem/hyper/base_classes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 699ceee53b..3bf5d5dd2e 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -28,8 +28,8 @@ def __init__(self, model_class): >>> import sklearn >>> import deepchem as dc >>> def rf_model_builder(model_params, model_dir): - sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) - return dc.models.SklearnModel(sklearn_model, model_dir) + ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) + ... return dc.models.SklearnModel(sklearn_model, model_dir) Parameters ---------- From ef070999f242cfcf1013e4891e3c298d04edaf64 Mon Sep 17 00:00:00 2001 From: nd-02110114 Date: Thu, 25 Jun 2020 01:08:11 +0900 Subject: [PATCH 13/23] :recycle: refactor --- deepchem/hyper/gaussian_process.py | 72 +++++++----------------------- 1 file changed, 16 insertions(+), 56 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 1cf2a026b6..916d79ed7b 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -145,9 +145,6 @@ def hyperparam_search(self, string representations of hyperparameter sets to validation scores. """ - if len(params_dict) > 20: - raise ValueError( - "This class can only search over 20 parameters in one invocation.") # Specify logfile if logfile: log_file = logfile @@ -156,48 +153,16 @@ def hyperparam_search(self, else: log_file = None + # setup range param_range = compute_parameter_range(params_dict, search_range) - param_range_keys = list(param_range.keys()) - param_range_values = [param_range[key] for key in param_range_keys] - - # Number of parameters - n_param = 0 - for val in params_dict.items(): - if isinstance(val, list): - n_param += len(val) - else: - n_param += 1 - - # Dummy names - param_name = ['l' + format(i, '02d') for i in range(20)] - # This is the dictionary of arguments we'll pass to pyGPGO - param = dict(zip(param_name[:n_param], param_range_values)) + param_keys = list(param_range.keys()) # Stores all results all_results = {} # Demarcating internal function for readability ######################## - def f(l00=0, - l01=0, - l02=0, - l03=0, - l04=0, - l05=0, - l06=0, - l07=0, - l08=0, - l09=0, - l10=0, - l11=0, - l12=0, - l13=0, - l14=0, - l15=0, - l16=0, - l17=0, - l18=0, - l19=0): + def f(**placeholders): """Private Optimizing function Take in hyper parameter values and return valid set performances @@ -214,17 +179,13 @@ def f(l00=0, valid set performances """ hyper_parameters = {} - # This is a dictionary of form {'l01': val, ...} binding - # arguments - args = locals() - # This bit of code re-associates hyperparameter values to their - # names from the arguments of this local function. - for i, hp in enumerate(param_range_keys): - if isinstance(params_dict[hp], int): - hyper_parameters[hp] = int(args[param_name[i]]) - elif isinstance(params_dict[hp], float): - hyper_parameters[hp] = float(args[param_name[i]]) - + for hp in param_keys: + if param_range[hp][0] == "int": + # param values are always float in BO, so this line converts float to int + # see : https://github.com/josejimenezluna/pyGPGO/issues/10 + hyper_parameters[hp] = int(placeholders[hp]) + else: + hyper_parameters[hp] = float(placeholders[hp]) logger.info("Running hyperparameter set: %s" % str(hyper_parameters)) if log_file: # Run benchmark @@ -283,18 +244,17 @@ def f(l00=0, cov = matern32() gp = GaussianProcess(cov) acq = Acquisition(mode='ExpectedImprovement') - gpgo = GPGO(gp, acq, f, param) + gpgo = GPGO(gp, acq, f, param_range) logger.info("Max number of iteration: %i" % max_iter) gpgo.run(max_iter=max_iter) hp_opt, valid_performance_opt = gpgo.getResult() - # Readout best hyper parameters hyper_parameters = {} - for i, hp in enumerate(param_range_keys): - if isinstance(params_dict[hp], int): - hyper_parameters[hp] = int(hp_opt[param_name[i]]) - elif isinstance(params_dict[hp], float): - hyper_parameters[hp] = float(hp_opt[param_name[i]]) + for hp in param_keys: + if param_range[hp][0] == "int": + hyper_parameters[hp] = int(hp_opt[hp]) + else: + hyper_parameters[hp] = float(hp_opt[hp]) hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) model_dir = "model%s" % hp_str hyper_parameters["model_dir"] = model_dir From 2ef0e4b77a137e13d5cdfa635aef3d80959a47bd Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Wed, 24 Jun 2020 14:45:16 -0700 Subject: [PATCH 14/23] changes --- deepchem/hyper/base_classes.py | 18 ++----- deepchem/hyper/gaussian_process.py | 44 +++++++++++----- deepchem/hyper/grid_search.py | 16 +++++- .../tests/test_gaussian_hyperparam_opt.py | 50 ++++++++++++++++--- deepchem/models/models.py | 2 +- deepchem/models/sklearn_models/__init__.py | 2 +- docs/hyper.rst | 2 +- .../gaussian_hyperparam_opt_with_logdir.py | 32 ++++++++++++ scripts/install_deepchem_conda.ps1 | 2 +- 9 files changed, 131 insertions(+), 37 deletions(-) create mode 100644 examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 3bf5d5dd2e..6618cb20b3 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -15,27 +15,17 @@ class itself is an abstract superclass and should never be directly instantiated. """ - def __init__(self, model_class): + def __init__(self, model_builder): """Initialize Hyperparameter Optimizer. Note this is an abstract constructor which should only be used by subclasses. - Example - ------- - This example shows the type of constructor function expected. - - >>> import sklearn - >>> import deepchem as dc - >>> def rf_model_builder(model_params, model_dir): - ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**model_params) - ... return dc.models.SklearnModel(sklearn_model, model_dir) - Parameters ---------- - model_class: constructor function. + model_builder: constructor function. This parameter must be constructor function which returns an - object which is an instance of `dc.model.Model`. This function + object which is an instance of `dc.models.Model`. This function must accept two arguments, `model_params` of type `dict` and `model_dir`, a string specifying a path to a model directory. See the example. @@ -44,7 +34,7 @@ def __init__(self, model_class): raise ValueError( "HyperparamOpt is an abstract superclass and cannot be directly instantiated. You probably want to instantiate a concrete subclass instead." ) - self.model_class = model_class + self.model_builder = model_builder def hyperparam_search(self, params_dict, diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 916d79ed7b..c529a692a2 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -86,9 +86,19 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): `GridHyperparamOpt`. `param_dict[hp]` must be an int/float and is used as the center of a search range. - Note - ---- - This class can only optimize 20 parameters at a time. + Example + ------- + This example shows the type of constructor function expected. + + >>> import sklearn + >>> import deepchem as dc + >>> def rf_model_builder(**model_params): + ... rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} + ... model_dir = model_params['model_dir'] + ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) + ... return dc.models.SklearnModel(sklearn_model, model_dir) + >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) + """ def hyperparam_search(self, @@ -149,7 +159,7 @@ def hyperparam_search(self, if logfile: log_file = logfile elif logdir is not None: - log_file = os.path.join(logdir, log_file) + log_file = os.path.join(logdir, "results.txt") else: log_file = None @@ -159,19 +169,20 @@ def hyperparam_search(self, # Stores all results all_results = {} + # Stores all model locations + model_locations = {} # Demarcating internal function for readability ######################## - def f(**placeholders): + def optimizing_function(**placeholders): """Private Optimizing function Take in hyper parameter values and return valid set performances Parameters ---------- - l00~l19: int or float - placeholders for hyperparameters being optimized, - hyper_parameters dict is rebuilt based on input values of placeholders + placeholders: keyword arguments + Should be various hyperparameters as specified in `param_keys` above. Returns: -------- @@ -209,7 +220,7 @@ def f(**placeholders): model_dir = tempfile.mkdtemp() # Add it on to the information needed for the constructor hyper_parameters["model_dir"] = model_dir - model = self.model_class(**hyper_parameters) + model = self.model_builder(**hyper_parameters) model.fit(train_dataset) try: model.save() @@ -228,6 +239,7 @@ def f(**placeholders): f.write('\n') # Store all results all_results[hp_str] = score + model_locations[hp_str] = model_dir # GPGO maximize performance by default, set performance to its negative value for minimization if use_max: return score @@ -244,7 +256,7 @@ def f(**placeholders): cov = matern32() gp = GaussianProcess(cov) acq = Acquisition(mode='ExpectedImprovement') - gpgo = GPGO(gp, acq, f, param_range) + gpgo = GPGO(gp, acq, optimizing_function, param_range) logger.info("Max number of iteration: %i" % max_iter) gpgo.run(max_iter=max_iter) @@ -256,9 +268,17 @@ def f(**placeholders): else: hyper_parameters[hp] = float(hp_opt[hp]) hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) - model_dir = "model%s" % hp_str + + # Let's reinitialize the model with the best parameters + model_dir = model_locations[hp_str] hyper_parameters["model_dir"] = model_dir - best_model = self.model_class(**hyper_parameters) + best_model = self.model_builder(**hyper_parameters) + # Some models need to be explicitly reloaded + try: + best_model.restore() + # Some models auto reload + except NotImplementedError: + pass # Compare best model to default hyperparameters if log_file: diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index f0206f2a22..aa574fc7ff 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -24,6 +24,20 @@ class GridHyperparamOpt(HyperparamOpt): hyperparameter space. This implementation is simple and simply does a direct iteration over all possible hyperparameters and doesn't use parallelization to speed up the search. + + Example + ------- + This example shows the type of constructor function expected. + + >>> import sklearn + >>> import deepchem as dc + >>> def rf_model_builder(**model_params): + ... rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} + ... model_dir = model_params['model_dir'] + ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) + ... return dc.models.SklearnModel(sklearn_model, model_dir) + >>> optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) + """ def hyperparam_search(self, @@ -101,7 +115,7 @@ def hyperparam_search(self, else: model_dir = tempfile.mkdtemp() model_params['model_dir'] = model_dir - model = self.model_class(**model_params) + model = self.model_builder(**model_params) model.fit(train_dataset) try: model.save() diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 17c5586542..723d3cd1fe 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -5,6 +5,7 @@ import sklearn import deepchem as dc import unittest +import tempfile class TestGaussianHyperparamOpt(unittest.TestCase): @@ -12,7 +13,8 @@ class TestGaussianHyperparamOpt(unittest.TestCase): Test Gaussian Hyperparameter Optimization. """ - def test_rf_example(self): + def setUp(self): + """Set up common resources.""" def rf_model_builder(**model_params): rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} @@ -20,17 +22,53 @@ def rf_model_builder(**model_params): sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) - train_dataset = dc.data.NumpyDataset( + self.rf_model_builder = rf_model_builder + self.train_dataset = dc.data.NumpyDataset( X=np.random.rand(50, 5), y=np.random.rand(50, 1)) - valid_dataset = dc.data.NumpyDataset( + self.valid_dataset = dc.data.NumpyDataset( X=np.random.rand(20, 5), y=np.random.rand(20, 1)) - optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) + + def test_rf_example(self): + """Test a simple example of optimizing a RF model with a gaussian process.""" + + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) params_dict = {"n_estimators": 10} transformers = [ dc.trans.NormalizationTransformer( - transform_y=True, dataset=train_dataset) + transform_y=True, dataset=self.train_dataset) ] metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, train_dataset, valid_dataset, transformers, metric) + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + max_iter=2) + + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] > 0 + + def test_rf_with_logdir(self): + """Test that using a logdir can work correctly.""" + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": 10} + transformers = [ + dc.trans.NormalizationTransformer( + transform_y=True, dataset=self.train_dataset) + ] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + logdir=tmpdirname, + max_iter=2) + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] > 0 diff --git a/deepchem/models/models.py b/deepchem/models/models.py index 993d915054..b6f7df235f 100644 --- a/deepchem/models/models.py +++ b/deepchem/models/models.py @@ -77,7 +77,7 @@ def predict_on_batch(self, X, **kwargs): raise NotImplementedError( "Each model is responsible for its own predict_on_batch method.") - def reload(self): + def restore(self): """ Reload trained model from disk. """ diff --git a/deepchem/models/sklearn_models/__init__.py b/deepchem/models/sklearn_models/__init__.py index dfcbe28209..b5cf0a007c 100644 --- a/deepchem/models/sklearn_models/__init__.py +++ b/deepchem/models/sklearn_models/__init__.py @@ -92,7 +92,7 @@ def save(self): """Saves sklearn model to disk using joblib.""" save_to_disk(self.model_instance, self.get_model_filename(self.model_dir)) - def reload(self): + def restore(self): """Loads sklearn model from joblib file on disk.""" self.model_instance = load_from_disk( Model.get_model_filename(self.model_dir)) diff --git a/docs/hyper.rst b/docs/hyper.rst index 8809814741..bc5e2fdc67 100644 --- a/docs/hyper.rst +++ b/docs/hyper.rst @@ -12,7 +12,7 @@ DeepChem's hyperparameter optimzation algorithms are simple and run in single-threaded fashion. They are not intended to be production grade hyperparameter utilities, but rather useful first tools as you start exploring your parameter space. As the needs of your application grow, -we recommend swapping to a more hyeavy duty hyperparameter +we recommend swapping to a more heavy duty hyperparameter optimization library. Hyperparameter Optimization API diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py b/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py new file mode 100644 index 0000000000..1aa32f81f5 --- /dev/null +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py @@ -0,0 +1,32 @@ +import numpy as np +np.random.seed(123) +import tensorflow as tf +tf.random.set_seed(123) +import deepchem as dc +import sklearn +import logging +logging.basicConfig(level=logging.INFO) + +# Load delaney dataset +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +train, valid, test = delaney_datasets + +# Fit models +metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + +def rf_model_builder(**model_params): + rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} + model_dir = model_params['model_dir'] + sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) + return dc.models.SklearnModel(sklearn_model, model_dir) + + +params_dict = {"n_estimators": 30} +optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) +best_model, best_params, all_results = optimizer.hyperparam_search( + params_dict, train, valid, transformers, metric, logdir="/tmp") + +valid_score = best_model.evaluate(valid, [metric], transformers) +print("valid_score") +print(valid_score) diff --git a/scripts/install_deepchem_conda.ps1 b/scripts/install_deepchem_conda.ps1 index 8c3e6d57b2..7ae678fc6b 100644 --- a/scripts/install_deepchem_conda.ps1 +++ b/scripts/install_deepchem_conda.ps1 @@ -35,5 +35,5 @@ conda install -y -q -c deepchem -c rdkit -c conda-forge -c omnia ` pytest-cov ` flaky - +pip install pyGPGO pip install -U matminer tensorflow==2.2 tensorflow-probability==0.10 From b97910e40fee6cb3116fbbfa9b72cede319a2ce0 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Mon, 29 Jun 2020 19:14:08 -0700 Subject: [PATCH 15/23] Review --- deepchem/hyper/base_classes.py | 4 +- deepchem/hyper/gaussian_process.py | 13 +- deepchem/hyper/grid_search.py | 7 +- .../tests/test_gaussian_hyperparam_opt.py | 99 ++++++++- .../hyper/tests/test_grid_hyperparam_opt.py | 189 +++++++----------- 5 files changed, 176 insertions(+), 136 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 6618cb20b3..1d059fece2 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -3,7 +3,7 @@ class HyperparamOpt(object): This class is an abstract base class for hyperparameter search classes in DeepChem. Hyperparameter search is performed on - `dc.model.Model` classes. Each hyperparameter object accepts a + `dc.models.Model` classes. Each hyperparameter object accepts a `dc.models.Model` class upon construct. When the `hyperparam_search` class is invoked, this class is used to construct many different concrete models which are trained on the specified training set and @@ -75,7 +75,7 @@ def hyperparam_search(self, Returns ------- `(best_model, best_hyperparams, all_scores)` where `best_model` is - an instance of `dc.model.Models`, `best_hyperparams` is a + an instance of `dc.models.Models`, `best_hyperparams` is a dictionary of parameters, and `all_scores` is a dictionary mapping string representations of hyperparameter sets to validation scores. diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index c529a692a2..1f6a1a837c 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -92,12 +92,7 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): >>> import sklearn >>> import deepchem as dc - >>> def rf_model_builder(**model_params): - ... rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - ... model_dir = model_params['model_dir'] - ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) - ... return dc.models.SklearnModel(sklearn_model, model_dir) - >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) + >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) """ @@ -131,6 +126,7 @@ def hyperparam_search(self, metric: `dc.metrics.Metric` metric used for evaluation use_max: bool, (default True) + Specifies whether to maximize or minimize `metric`. maximization(True) or minimization(False) logdir: str, optional The directory in which to store created models. If not set, will @@ -228,8 +224,9 @@ def optimizing_function(**placeholders): except NotImplementedError: pass - evaluator = Evaluator(model, valid_dataset, transformers) - multitask_scores = evaluator.compute_model_performance([metric]) + #evaluator = Evaluator(model, valid_dataset, transformers) + #multitask_scores = evaluator.compute_model_performance([metric]) + multitask_scores = model.evaluate(valid_dataset, [metric]) score = multitask_scores[metric.name] if log_file: diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index aa574fc7ff..fe1b7c2687 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -31,12 +31,7 @@ class GridHyperparamOpt(HyperparamOpt): >>> import sklearn >>> import deepchem as dc - >>> def rf_model_builder(**model_params): - ... rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - ... model_dir = model_params['model_dir'] - ... sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) - ... return dc.models.SklearnModel(sklearn_model, model_dir) - >>> optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) + >>> optimizer = dc.hyper.GridHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) """ diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 723d3cd1fe..6e77994024 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -33,10 +33,7 @@ def test_rf_example(self): optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) params_dict = {"n_estimators": 10} - transformers = [ - dc.trans.NormalizationTransformer( - transform_y=True, dataset=self.train_dataset) - ] + transformers = [] metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( @@ -49,16 +46,36 @@ def test_rf_example(self): valid_score = best_model.evaluate(self.valid_dataset, [metric], transformers) + assert valid_score["pearson_r2_score"] == max(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 + + def test_rf_example_min(self): + """Test a simple example of optimizing a RF model with a gaussian process looking for minimum score.""" + + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": 10} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + use_max=False, + max_iter=2) + + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] == min(all_results.values()) assert valid_score["pearson_r2_score"] > 0 def test_rf_with_logdir(self): """Test that using a logdir can work correctly.""" optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) params_dict = {"n_estimators": 10} - transformers = [ - dc.trans.NormalizationTransformer( - transform_y=True, dataset=self.train_dataset) - ] + transformers = [] metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) with tempfile.TemporaryDirectory() as tmpdirname: best_model, best_hyperparams, all_results = optimizer.hyperparam_search( @@ -71,4 +88,70 @@ def test_rf_with_logdir(self): max_iter=2) valid_score = best_model.evaluate(self.valid_dataset, [metric], transformers) + assert valid_score["pearson_r2_score"] == max(all_results.values()) assert valid_score["pearson_r2_score"] > 0 + + def test_regression_overfit(self): + """Test that MultitaskRegressor can overfit simple regression datasets.""" + n_samples = 10 + n_features = 3 + n_tasks = 1 + + # Generate dummy dataset + np.random.seed(123) + ids = np.arange(n_samples) + X = np.random.rand(n_samples, n_features) + y = np.zeros((n_samples, n_tasks)) + w = np.ones((n_samples, n_tasks)) + dataset = dc.data.NumpyDataset(X, y, w, ids) + + regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error) + # TODO(rbharath): This breaks with optimizer="momentum". Why? + model = dc.models.MultitaskRegressor( + n_tasks, + n_features, + dropouts=[0.], + weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], + batch_size=n_samples, + learning_rate=0.003) + + # Fit trained model + model.fit(dataset, nb_epoch=100) + + # Eval model on train + scores = model.evaluate(dataset, [regression_metric]) + assert scores[regression_metric.name] < .1 + + def test_multitask_example(self): + """Test a simple example of optimizing a multitask model with a grid search.""" + # Generate dummy dataset + np.random.seed(123) + train_dataset = dc.data.NumpyDataset( + np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), + np.arange(10)) + valid_dataset = dc.data.NumpyDataset( + np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) + + optimizer = dc.hyper.GaussianProcessHyperparamOpt( + lambda **p: dc.models.MultitaskRegressor(n_tasks=2, + n_features=3, dropouts=[0.], + weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)], + learning_rate=0.003, **p)) + + params_dict = {"batch_size": 10} + transformers = [] + metric = dc.metrics.Metric( + dc.metrics.mean_squared_error, task_averager=np.mean) + + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + max_iter=2, + use_max=False) + + valid_score = best_model.evaluate(valid_dataset, [metric]) + assert valid_score["mean-mean_squared_error"] == min(all_results.values()) + assert valid_score["mean-mean_squared_error"] > 0 diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py index c95af0219d..362eb5cc7c 100644 --- a/deepchem/hyper/tests/test_grid_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -18,140 +18,105 @@ class TestGridHyperparamOpt(unittest.TestCase): Test grid hyperparameter optimization API. """ - def test_rf_hyperparam(self): - """Test of hyperparam_opt with singletask RF ECFP regression API.""" - featurizer = dc.feat.CircularFingerprint(size=1024) - tasks = ["log-solubility"] - current_dir = os.path.dirname(os.path.abspath(__file__)) - input_file = os.path.join(current_dir, "../../models/tests/example.csv") - loader = dc.data.CSVLoader( - tasks=tasks, smiles_field="smiles", featurizer=featurizer) - dataset = loader.featurize(input_file) - - splitter = dc.splits.ScaffoldSplitter() - train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( - dataset) - - transformers = [ - dc.trans.NormalizationTransformer( - transform_y=True, dataset=train_dataset) - ] - for dataset in [train_dataset, test_dataset]: - for transformer in transformers: - dataset = transformer.transform(dataset) - - params_dict = {"n_estimators": [10, 100]} - metric = dc.metrics.Metric(dc.metrics.r2_score) + def setUp(self): + """Set up common resources.""" def rf_model_builder(**model_params): rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} model_dir = model_params['model_dir'] - sklearn_model = RandomForestRegressor(**rf_params) + sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) return dc.models.SklearnModel(sklearn_model, model_dir) - optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric, - logdir=None) - - def test_multitask_rf_hyperparam_opt(self): - """Test of hyperparam_opt with singletask_to_multitask.""" - tasks = [ - "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", - "task8", "task9", "task10", "task11", "task12", "task13", "task14", - "task15", "task16" - ] - input_file = "multitask_example.csv" - - n_features = 10 - n_tasks = len(tasks) - # Define train dataset - n_train = 100 - X_train = np.random.rand(n_train, n_features) - y_train = np.random.randint(2, size=(n_train, n_tasks)) - w_train = np.ones_like(y_train) - ids_train = ["C"] * n_train - - train_dataset = dc.data.DiskDataset.from_numpy(X_train, y_train, w_train, - ids_train, tasks) - - # Define validation dataset - n_valid = 10 - X_valid = np.random.rand(n_valid, n_features) - y_valid = np.random.randint(2, size=(n_valid, n_tasks)) - w_valid = np.ones_like(y_valid) - ids_valid = ["C"] * n_valid - valid_dataset = dc.data.DiskDataset.from_numpy(X_valid, y_valid, w_valid, - ids_valid, tasks) + self.rf_model_builder = rf_model_builder + self.train_dataset = dc.data.NumpyDataset( + X=np.random.rand(50, 5), y=np.random.rand(50, 1)) + self.valid_dataset = dc.data.NumpyDataset( + X=np.random.rand(20, 5), y=np.random.rand(20, 1)) + def test_rf_hyperparam(self): + """Test of hyperparam_opt with singletask RF ECFP regression API.""" + optimizer = dc.hyper.GridHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": [10, 100]} transformers = [] - classification_metric = dc.metrics.Metric( - dc.metrics.matthews_corrcoef, np.mean, mode="classification") - params_dict = {"n_estimators": [1, 10]} + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - def multitask_model_builder(**model_params): - rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - model_dir = model_params['model_dir'] + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, self.train_dataset, self.valid_dataset, transformers, + metric) + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) - def model_builder(model_dir): - sklearn_model = RandomForestClassifier(**rf_params) - return dc.models.SklearnModel(sklearn_model, model_dir) + assert valid_score["pearson_r2_score"] == max(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 - return dc.models.SingletaskToMultitask(tasks, model_builder, model_dir) + def test_rf_hyperparam_min(self): + """Test of hyperparam_opt with singletask RF ECFP regression API.""" + optimizer = dc.hyper.GridHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": [10, 100]} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - optimizer = dc.hyper.GridHyperparamOpt(multitask_model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, - train_dataset, - valid_dataset, + self.train_dataset, + self.valid_dataset, transformers, - classification_metric, - logdir=None) - - def test_mlp_hyperparam_opt(self): - """Straightforward test of Tensorflow multitask deepchem classification API.""" - current_dir = os.path.dirname(os.path.abspath(__file__)) - input_file = os.path.join(current_dir, - "../../models/tests/multitask_example.csv") - tasks = [ - "task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7", - "task8", "task9", "task10", "task11", "task12", "task13", "task14", - "task15", "task16" - ] - - n_features = 1024 - featurizer = dc.feat.CircularFingerprint(size=n_features) - - loader = dc.data.CSVLoader( - tasks=tasks, smiles_field="smiles", featurizer=featurizer) - dataset = loader.featurize(input_file) - - splitter = dc.splits.ScaffoldSplitter() - train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split( - dataset) + metric, + use_max=False) + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + + assert valid_score["pearson_r2_score"] == min(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 + def test_rf_with_logdir(self): + """Test that using a logdir can work correctly.""" + optimizer = dc.hyper.GridHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": [10, 5]} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + logdir=tmpdirname) + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] == max(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 + + def test_multitask_example(self): + """Test a simple example of optimizing a multitask model with a grid search.""" + # Generate dummy dataset + np.random.seed(123) + train_dataset = dc.data.NumpyDataset( + np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), + np.arange(10)) + valid_dataset = dc.data.NumpyDataset( + np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) + + optimizer = dc.hyper.GridHyperparamOpt( + lambda **p: dc.models.MultitaskRegressor(n_tasks=2, + n_features=3, dropouts=[0.], + weight_init_stddevs=[np.sqrt(6)/np.sqrt(1000)], + learning_rate=0.003, **p)) + + params_dict = {"batch_size": [10, 20]} transformers = [] metric = dc.metrics.Metric( - dc.metrics.roc_auc_score, np.mean, mode="classification") - params_dict = {"layer_sizes": [(10,), (100,)]} + dc.metrics.mean_squared_error, task_averager=np.mean) - def model_builder(**model_params): - model_dir = model_params['model_dir'] - multitask_params = { - k: v for (k, v) in model_params.items() if k != 'model_dir' - } - return dc.models.MultitaskClassifier( - len(tasks), n_features, model_dir=model_dir, **multitask_params) - - optimizer = dc.hyper.GridHyperparamOpt(model_builder) best_model, best_hyperparams, all_results = optimizer.hyperparam_search( params_dict, train_dataset, valid_dataset, transformers, metric, - logdir=None) + use_max=False) + + valid_score = best_model.evaluate(valid_dataset, [metric]) + assert valid_score["mean-mean_squared_error"] == min(all_results.values()) + assert valid_score["mean-mean_squared_error"] > 0 From fe7bdb7b5ff108ffc0e6a30a5102a8856e9a2ae2 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 30 Jun 2020 14:05:22 -0700 Subject: [PATCH 16/23] Changes --- deepchem/hyper/base_classes.py | 8 +++++-- deepchem/hyper/gaussian_process.py | 36 ++++++++++++++++++++++++------ 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 1d059fece2..1e95b91520 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -53,9 +53,13 @@ def hyperparam_search(self, Parameters ---------- params_dict: dict - Dictionary mapping strings to Ints/Floats/Lists. Note that the + Dictionary mapping strings to values. Note that the precise semantics of `params_dict` will change depending on the - optimizer that you're using. + optimizer that you're using. Depending on the type of + hyperparameter optimization, these values can be + ints/floats/strings/lists/etc. Read the documentation for the + concrete hyperparameter optimization subclass you're using to + learn more about what's expected. train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 1f6a1a837c..57bfd59527 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -18,7 +18,7 @@ def _convert_hyperparam_dict_to_filename(hyper_params): Parameters ---------- hyper_params: dict - Maps string of hyperparameter name to int/float/list. + Maps string of hyperparameter name to int/float. Returns ------- @@ -32,8 +32,12 @@ def _convert_hyperparam_dict_to_filename(hyper_params): value = hyper_params[key] if isinstance(value, int): filename += "_%s" % str(value) - else: + elif isinstance(value, float): filename += "_%.2f" % value + else: + raise ValueError( + "Hyperparameters to search must be specified as ints/floats since GaussianProcessHyperparamOpt searches over a range of numbers around the specified point." + ) return filename @@ -43,9 +47,10 @@ def compute_parameter_range(params_dict, search_range): Parameters ---------- params_dict: dict - Dictionary mapping strings to Ints/Floats/Lists. For those - parameters in which int/float is specified, an explicit list of - parameters is computed with `search_range`. + Dictionary mapping strings to Ints/Floats. An explicit list of + parameters is computed with `search_range`. The optimization range + computed is specified in the documentation for `search_range` + below. search_range: int(float) (default 4) For int/float values in `params_dict`, computes optimization range on `[initial values / search_range, initial values * @@ -57,7 +62,9 @@ def compute_parameter_range(params_dict, search_range): Dictionary mapping hyperparameter names to tuples. Each tuple is of form `(value_type, value_range)` where `value_type` is a string that is either "int" or "cont" and `value_range` is a list of two - elements of the form `[low, hi]` + elements of the form `[low, hi]`. This format is expected by + pyGPGO which `GaussianProcessHyperparamOpt` uses to perform + optimization. """ # Range of optimization param_range = {} @@ -94,6 +101,20 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): >>> import deepchem as dc >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) + Here's a more sophisticated example that shows how to optimize only + some parameters of a model + + >>> def model_builder(**model_params): + ... n_layers = model_params['layers'] + ... layer_width = model_params['width'] + ... dropout = model_params['dropout'] + ... return dc.models.MultitaskClassifier( + ... n_tasks=5, + ... n_features=100, + ... layer_sizes=[layer_width]*n_layers, + ... dropouts=dropout + ... ) + >> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder) """ def hyperparam_search(self, @@ -116,7 +137,8 @@ def hyperparam_search(self, values. The semantics of this list are different than for `GridHyperparamOpt`. `params_dict[hp]` must map to an int/float, which is used as the center of a search with radius - `search_range`. + `search_range` since pyGPGO can only optimize numerical + hyperparameters. train_dataset: `dc.data.Dataset` dataset used for training valid_dataset: `dc.data.Dataset` From 5ecedefb5267edee68c9b501e6684b8f289f4a75 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 30 Jun 2020 14:32:49 -0700 Subject: [PATCH 17/23] Fixing comments --- deepchem/hyper/gaussian_process.py | 70 ++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 19 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 57bfd59527..babf8ca554 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -51,10 +51,24 @@ def compute_parameter_range(params_dict, search_range): parameters is computed with `search_range`. The optimization range computed is specified in the documentation for `search_range` below. - search_range: int(float) (default 4) - For int/float values in `params_dict`, computes optimization range - on `[initial values / search_range, initial values * - search_range]` + search_range: int(float)/dict (default 4) + The `search_range` specifies the range of parameter values to + search for. If `search_range` is an int/float, it is used as the + global search range for parameters. This creates a search + problem on the following space: + + optimization on [initial value / search_range, + initial value * search_range] + + If `search_range` is a dict, it must contain the same keys as + for `params_dict`. In this case, `search_range` specifies a + per-parameter search range. This is useful in case some + parameters have a larger natural range than others. For a given + hyperparameter `hp` this would create the following search + range: + + optimization on hp on [initial value[hp] / search_range[hp], + initial value[hp] * search_range[hp]] Returns ------- @@ -102,19 +116,23 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) Here's a more sophisticated example that shows how to optimize only - some parameters of a model + some parameters of a model. In this case, we have some parameters we + want to optimize, and others which we don't. To handle this type of + search, we create a `model_builder` which hard codes some arguments + (in this case, `n_tasks` and `n_features` which are properties of a + dataset and not hyperparameters to search over.) >>> def model_builder(**model_params): - ... n_layers = model_params['layers'] - ... layer_width = model_params['width'] - ... dropout = model_params['dropout'] - ... return dc.models.MultitaskClassifier( - ... n_tasks=5, - ... n_features=100, - ... layer_sizes=[layer_width]*n_layers, - ... dropouts=dropout - ... ) - >> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder) + ... n_layers = model_params['layers'] + ... layer_width = model_params['width'] + ... dropout = model_params['dropout'] + ... return dc.models.MultitaskClassifier( + ... n_tasks=5, + ... n_features=100, + ... layer_sizes=[layer_width]*n_layers, + ... dropouts=dropout + ... ) + >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(model_builder) """ def hyperparam_search(self, @@ -155,10 +173,24 @@ def hyperparam_search(self, use a temporary directory. max_iter: int, (default 20) number of optimization trials - search_range: int(float) (default 4) - optimization on [initial values / search_range, - initial values * search_range] - names of parameters that should not be optimized + search_range: int(float)/dict (default 4) + The `search_range` specifies the range of parameter values to + search for. If `search_range` is an int/float, it is used as the + global search range for parameters. This creates a search + problem on the following space: + + optimization on [initial value / search_range, + initial value * search_range] + + If `search_range` is a dict, it must contain the same keys as + for `params_dict`. In this case, `search_range` specifies a + per-parameter search range. This is useful in case some + parameters have a larger natural range than others. For a given + hyperparameter `hp` this would create the following search + range: + + optimization on hp on [initial value[hp] / search_range[hp], + initial value[hp] * search_range[hp]] logfile: str Name of logfile to write results to. If specified, this is must be a valid file. If not specified, results of hyperparameter From ecd815fd3e34ed9930ac6c6f1fdc320d5c6303fc Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 30 Jun 2020 19:14:42 -0700 Subject: [PATCH 18/23] Changes --- deepchem/hyper/base_classes.py | 32 +++++++++++ deepchem/hyper/gaussian_process.py | 56 ++++++++----------- deepchem/hyper/grid_search.py | 6 +- .../tests/test_gaussian_hyperparam_opt.py | 56 ++++++++++++++++++- .../hyper/tests/test_grid_hyperparam_opt.py | 44 +++++++++++++++ 5 files changed, 158 insertions(+), 36 deletions(-) diff --git a/deepchem/hyper/base_classes.py b/deepchem/hyper/base_classes.py index 1e95b91520..5b9060b28c 100644 --- a/deepchem/hyper/base_classes.py +++ b/deepchem/hyper/base_classes.py @@ -1,3 +1,35 @@ +import logging + +logger = logging.getLogger(__name__) + + +def _convert_hyperparam_dict_to_filename(hyper_params): + """Helper function that converts a dictionary of hyperparameters to a string that can be a filename. + + Parameters + ---------- + hyper_params: dict + Maps string of hyperparameter name to int/float. + + Returns + ------- + filename: str + A filename of form "_key1_value1_value2_..._key2..." + """ + filename = "" + keys = sorted(hyper_params.keys()) + for key in keys: + filename += "_%s" % str(key) + value = hyper_params[key] + if isinstance(value, int): + filename += "_%s" % str(value) + elif isinstance(value, float): + filename += "_%.2f" % value + else: + filename += "%s" % str(value) + return filename + + class HyperparamOpt(object): """Abstract superclass for hyperparameter search classes. diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index babf8ca554..03aaadc92d 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -8,39 +8,11 @@ import deepchem from deepchem.hyper.base_classes import HyperparamOpt from deepchem.utils.evaluate import Evaluator +from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename logger = logging.getLogger(__name__) -def _convert_hyperparam_dict_to_filename(hyper_params): - """Helper function that converts a dictionary of hyperparameters to a string that can be a filename. - - Parameters - ---------- - hyper_params: dict - Maps string of hyperparameter name to int/float. - - Returns - ------- - filename: str - A filename of form "_key1_value1_value2_..._key2..." - """ - filename = "" - keys = sorted(hyper_params.keys()) - for key in keys: - filename += "_%s" % str(key) - value = hyper_params[key] - if isinstance(value, int): - filename += "_%s" % str(value) - elif isinstance(value, float): - filename += "_%.2f" % value - else: - raise ValueError( - "Hyperparameters to search must be specified as ints/floats since GaussianProcessHyperparamOpt searches over a range of numbers around the specified point." - ) - return filename - - def compute_parameter_range(params_dict, search_range): """Convenience Function to compute parameter search space. @@ -82,16 +54,27 @@ def compute_parameter_range(params_dict, search_range): """ # Range of optimization param_range = {} + if isinstance(search_range, dict): + if sorted(params_dict.keys()) != sorted(search_range.keys()): + raise ValueError( + "If search_range is provided as a dictionary, it must have the same keys as params_dict." + ) + elif (not isinstance(search_range, int)) and (not isinstance( + search_range, float)): + raise ValueError("search_range must be a dict or int or float.") for hp, value in params_dict.items(): + if isinstance(search_range, dict): + hp_search_range = search_range[hp] + else: + # We know from guard above that this is an int/float + hp_search_range = search_range if isinstance(value, int): - value_range = [value // search_range, value * search_range] + value_range = [value // hp_search_range, value * hp_search_range] param_range[hp] = ("int", value_range) - pass elif isinstance(value, float): - value_range = [value / search_range, value * search_range] + value_range = [value / hp_search_range, value * hp_search_range] param_range[hp] = ("cont", value_range) - pass - return param_range + return param_range class GaussianProcessHyperparamOpt(HyperparamOpt): @@ -239,6 +222,10 @@ def optimizing_function(**placeholders): valid_scores: float valid set performances """ + ############################ + print("placeholders: %s" % str(placeholders)) + print("param_range: %s" % str(param_range)) + ############################ hyper_parameters = {} for hp in param_keys: if param_range[hp][0] == "int": @@ -335,6 +322,7 @@ def optimizing_function(**placeholders): if log_file: with open(log_file, 'a') as f: # Record hyperparameters + f.write("params_dict:") f.write(str(params_dict)) f.write('\n') diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index fe1b7c2687..1366c52604 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -12,6 +12,7 @@ from operator import mul from deepchem.utils.evaluate import Evaluator from deepchem.hyper.base_classes import HyperparamOpt +from deepchem.hyper.base_classes import _convert_hyperparam_dict_to_filename logger = logging.getLogger(__name__) @@ -94,6 +95,8 @@ def hyperparam_search(self, itertools.product(*hyperparam_vals)): model_params = {} logger.info("Fitting model %d/%d" % (ind + 1, number_combinations)) + # Construction dictionary mapping hyperparameter names to values + hyper_params = dict(zip(hyperparams, hyperparameter_tuple)) for hyperparam, hyperparam_val in zip(hyperparams, hyperparameter_tuple): model_params[hyperparam] = hyperparam_val logger.info("hyperparameters: %s" % str(model_params)) @@ -121,7 +124,8 @@ def hyperparam_search(self, evaluator = Evaluator(model, valid_dataset, output_transformers) multitask_scores = evaluator.compute_model_performance([metric]) valid_score = multitask_scores[metric.name] - all_scores[str(hyperparameter_tuple)] = valid_score + hp_str = _convert_hyperparam_dict_to_filename(hyper_params) + all_scores[hp_str] = valid_score if (use_max and valid_score >= best_validation_score) or ( not use_max and valid_score <= best_validation_score): diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 6e77994024..aee10eb108 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -1,11 +1,17 @@ """ Tests for Gaussian Process Hyperparameter Optimization. + +These tests fails every so often. I think it's when the Gaussian +process optimizer doesn't find an optimal point. This is still a +valuable test suite so leaving it in despite the flakiness. """ +import os import numpy as np import sklearn import deepchem as dc import unittest import tempfile +from flaky import flaky class TestGaussianHyperparamOpt(unittest.TestCase): @@ -122,8 +128,9 @@ def test_regression_overfit(self): scores = model.evaluate(dataset, [regression_metric]) assert scores[regression_metric.name] < .1 + @flaky def test_multitask_example(self): - """Test a simple example of optimizing a multitask model with a grid search.""" + """Test a simple example of optimizing a multitask model with a gaussian process search.""" # Generate dummy dataset np.random.seed(123) train_dataset = dc.data.NumpyDataset( @@ -155,3 +162,50 @@ def test_multitask_example(self): valid_score = best_model.evaluate(valid_dataset, [metric]) assert valid_score["mean-mean_squared_error"] == min(all_results.values()) assert valid_score["mean-mean_squared_error"] > 0 + + @flaky + def test_multitask_example_different_search_range(self): + """Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range.""" + # Generate dummy dataset + np.random.seed(123) + train_dataset = dc.data.NumpyDataset( + np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), + np.arange(10)) + valid_dataset = dc.data.NumpyDataset( + np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) + + optimizer = dc.hyper.GaussianProcessHyperparamOpt( + lambda **p: dc.models.MultitaskRegressor( + n_tasks=2, + n_features=3, + dropouts=[0.], + weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], + #learning_rate=0.003, **p)) + **p)) + + params_dict = {"learning_rate": 0.003, "batch_size": 10} + # These are per-example multiplier + search_range = {"learning_rate": 10, "batch_size": 4} + transformers = [] + metric = dc.metrics.Metric( + dc.metrics.mean_squared_error, task_averager=np.mean) + + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + max_iter=2, + logdir=tmpdirname, + search_range=search_range, + use_max=False) + valid_score = best_model.evaluate(valid_dataset, [metric]) + # Test that 2 parameters were optimized + for hp_str in all_results.keys(): + # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example + assert "batch_size" in hp_str + assert "learning_rate" in hp_str + assert valid_score["mean-mean_squared_error"] == min(all_results.values()) + assert valid_score["mean-mean_squared_error"] > 0 diff --git a/deepchem/hyper/tests/test_grid_hyperparam_opt.py b/deepchem/hyper/tests/test_grid_hyperparam_opt.py index 362eb5cc7c..3f0c5899f5 100644 --- a/deepchem/hyper/tests/test_grid_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_grid_hyperparam_opt.py @@ -120,3 +120,47 @@ def test_multitask_example(self): valid_score = best_model.evaluate(valid_dataset, [metric]) assert valid_score["mean-mean_squared_error"] == min(all_results.values()) assert valid_score["mean-mean_squared_error"] > 0 + + def test_multitask_example_multiple_params(self): + """Test a simple example of optimizing a multitask model with a grid search with multiple parameters to optimize.""" + # Generate dummy dataset + np.random.seed(123) + train_dataset = dc.data.NumpyDataset( + np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), + np.arange(10)) + valid_dataset = dc.data.NumpyDataset( + np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) + + optimizer = dc.hyper.GridHyperparamOpt( + lambda **p: dc.models.MultitaskRegressor( + n_tasks=2, + n_features=3, + dropouts=[0.], + weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], + #learning_rate=0.003, **p)) + **p)) + + params_dict = {"learning_rate": [0.003, 0.03], "batch_size": [10, 50]} + # These are per-example multiplier + transformers = [] + metric = dc.metrics.Metric( + dc.metrics.mean_squared_error, task_averager=np.mean) + + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + logdir=tmpdirname, + use_max=False) + valid_score = best_model.evaluate(valid_dataset, [metric]) + # Test that 2 parameters were optimized + for hp_str in all_results.keys(): + # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example + assert "batch_size" in hp_str + assert "learning_rate" in hp_str + + assert valid_score["mean-mean_squared_error"] == min(all_results.values()) + assert valid_score["mean-mean_squared_error"] > 0 From b880f6bcd02eaa7a6bc7a80b9f7f730524c3389e Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Tue, 30 Jun 2020 20:02:11 -0700 Subject: [PATCH 19/23] Changes --- deepchem/hyper/gaussian_process.py | 6 +--- deepchem/hyper/grid_search.py | 19 ++++++++++++ .../tests/test_gaussian_hyperparam_opt.py | 31 ------------------- .../hyperparam_opt/gaussian_hyperparam_opt.py | 23 +++++++------- .../gaussian_hyperparam_opt_with_logdir.py | 19 +++++------- .../hyperparam_opt/grid_hyperparam_opt.py | 22 +++++++------ 6 files changed, 50 insertions(+), 70 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 03aaadc92d..a668a51bff 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -96,7 +96,7 @@ class GaussianProcessHyperparamOpt(HyperparamOpt): >>> import sklearn >>> import deepchem as dc - >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) + >>> optimizer = dc.hyper.GaussianProcessHyperparamOpt(lambda **p: dc.models.GraphConvModel(n_tasks=1, **p)) Here's a more sophisticated example that shows how to optimize only some parameters of a model. In this case, we have some parameters we @@ -222,10 +222,6 @@ def optimizing_function(**placeholders): valid_scores: float valid set performances """ - ############################ - print("placeholders: %s" % str(placeholders)) - print("param_range: %s" % str(param_range)) - ############################ hyper_parameters = {} for hp in param_keys: if param_range[hp][0] == "int": diff --git a/deepchem/hyper/grid_search.py b/deepchem/hyper/grid_search.py index 1366c52604..151d944aec 100644 --- a/deepchem/hyper/grid_search.py +++ b/deepchem/hyper/grid_search.py @@ -34,6 +34,25 @@ class GridHyperparamOpt(HyperparamOpt): >>> import deepchem as dc >>> optimizer = dc.hyper.GridHyperparamOpt(lambda **p: dc.models.GraphConvModel(**p)) + Here's a more sophisticated example that shows how to optimize only + some parameters of a model. In this case, we have some parameters we + want to optimize, and others which we don't. To handle this type of + search, we create a `model_builder` which hard codes some arguments + (in this case, `n_tasks` and `n_features` which are properties of a + dataset and not hyperparameters to search over.) + + >>> def model_builder(**model_params): + ... n_layers = model_params['layers'] + ... layer_width = model_params['width'] + ... dropout = model_params['dropout'] + ... return dc.models.MultitaskClassifier( + ... n_tasks=5, + ... n_features=100, + ... layer_sizes=[layer_width]*n_layers, + ... dropouts=dropout + ... ) + >>> optimizer = dc.hyper.GridHyperparamOpt(model_builder) + """ def hyperparam_search(self, diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index aee10eb108..56812ef0eb 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -97,37 +97,6 @@ def test_rf_with_logdir(self): assert valid_score["pearson_r2_score"] == max(all_results.values()) assert valid_score["pearson_r2_score"] > 0 - def test_regression_overfit(self): - """Test that MultitaskRegressor can overfit simple regression datasets.""" - n_samples = 10 - n_features = 3 - n_tasks = 1 - - # Generate dummy dataset - np.random.seed(123) - ids = np.arange(n_samples) - X = np.random.rand(n_samples, n_features) - y = np.zeros((n_samples, n_tasks)) - w = np.ones((n_samples, n_tasks)) - dataset = dc.data.NumpyDataset(X, y, w, ids) - - regression_metric = dc.metrics.Metric(dc.metrics.mean_squared_error) - # TODO(rbharath): This breaks with optimizer="momentum". Why? - model = dc.models.MultitaskRegressor( - n_tasks, - n_features, - dropouts=[0.], - weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], - batch_size=n_samples, - learning_rate=0.003) - - # Fit trained model - model.fit(dataset, nb_epoch=100) - - # Eval model on train - scores = model.evaluate(dataset, [regression_metric]) - assert scores[regression_metric.name] < .1 - @flaky def test_multitask_example(self): """Test a simple example of optimizing a multitask model with a gaussian process search.""" diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py index 0b55e8493e..1fc654c04f 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -6,21 +6,20 @@ import sklearn # Load delaney dataset -delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney( + featurizer="GraphConv") train, valid, test = delaney_datasets # Fit models metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +optimizer = dc.hyper.GaussianProcessHyperparamOpt( + lambda **p: dc.models.GraphConvModel( + n_tasks=len(delaney_tasks), mode="regression", **p)) - -def rf_model_builder(**model_params): - rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - model_dir = model_params['model_dir'] - sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) - return dc.models.SklearnModel(sklearn_model, model_dir) - - -params_dict = {"n_estimators": 30} -optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) +params_dict = {"dropout": 0.5} best_model, best_params, all_results = optimizer.hyperparam_search( - params_dict, train, valid, transformers, metric) + params_dict, train, valid, transformers, metric, max_iter=2, search_range=2) + +valid_score = best_model.evaluate(valid, [metric], transformers) +print("valid_score") +print(valid_score) diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py b/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py index 1aa32f81f5..c9579dfe61 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt_with_logdir.py @@ -8,24 +8,19 @@ logging.basicConfig(level=logging.INFO) # Load delaney dataset -delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney( + featurizer="GraphConv") train, valid, test = delaney_datasets # Fit models metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +optimizer = dc.hyper.GaussianProcessHyperparamOpt( + lambda **p: dc.models.GraphConvModel( + n_tasks=len(delaney_tasks), mode="regression", **p)) - -def rf_model_builder(**model_params): - rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - model_dir = model_params['model_dir'] - sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) - return dc.models.SklearnModel(sklearn_model, model_dir) - - -params_dict = {"n_estimators": 30} -optimizer = dc.hyper.GaussianProcessHyperparamOpt(rf_model_builder) +params_dict = {"dropout": 0.5} best_model, best_params, all_results = optimizer.hyperparam_search( - params_dict, train, valid, transformers, metric, logdir="/tmp") + params_dict, train, valid, transformers, metric, max_iter=2, search_range=2) valid_score = best_model.evaluate(valid, [metric], transformers) print("valid_score") diff --git a/examples/hyperparam_opt/grid_hyperparam_opt.py b/examples/hyperparam_opt/grid_hyperparam_opt.py index ae0286a7e4..c427c81b6c 100644 --- a/examples/hyperparam_opt/grid_hyperparam_opt.py +++ b/examples/hyperparam_opt/grid_hyperparam_opt.py @@ -6,21 +6,23 @@ import sklearn # Load delaney dataset -delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney() +delaney_tasks, delaney_datasets, transformers = dc.molnet.load_delaney( + featurizer="GraphConv") train, valid, test = delaney_datasets # Fit models metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +# Fit models +metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +optimizer = dc.hyper.GridHyperparamOpt( + lambda **p: dc.models.GraphConvModel( + n_tasks=len(delaney_tasks), mode="regression", **p)) -def rf_model_builder(**model_params): - rf_params = {k: v for (k, v) in model_params.items() if k != 'model_dir'} - model_dir = model_params['model_dir'] - sklearn_model = sklearn.ensemble.RandomForestRegressor(**rf_params) - return dc.models.SklearnModel(sklearn_model, model_dir) - - -params_dict = {"n_estimators": [10, 30, 50, 100]} -optimizer = dc.hyper.GridHyperparamOpt(rf_model_builder) +params_dict = {"dropout": [0.1, 0.5]} best_model, best_params, all_results = optimizer.hyperparam_search( params_dict, train, valid, transformers, metric) + +valid_score = best_model.evaluate(valid, [metric], transformers) +print("valid_score") +print(valid_score) From acb0c8c705c1c9156bcbba45acacdd83e192dcc3 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Thu, 2 Jul 2020 12:26:38 -0700 Subject: [PATCH 20/23] changes --- deepchem/hyper/gaussian_process.py | 22 +- .../tests/test_gaussian_hyperparam_opt.py | 217 +++++++++--------- .../hyperparam_opt/gaussian_hyperparam_opt.py | 2 +- 3 files changed, 126 insertions(+), 115 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index a668a51bff..4bc79e01f3 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -192,6 +192,9 @@ def hyperparam_search(self, if logfile: log_file = logfile elif logdir is not None: + # Make logdir if it doesn't exist. + if not os.path.exists(logdir): + os.makedirs(logdir, exist_ok=True) log_file = os.path.join(logdir, "results.txt") else: log_file = None @@ -232,10 +235,9 @@ def optimizing_function(**placeholders): hyper_parameters[hp] = float(placeholders[hp]) logger.info("Running hyperparameter set: %s" % str(hyper_parameters)) if log_file: - # Run benchmark - with open(log_file, 'a') as f: + with open(log_file, 'w+') as f: # Record hyperparameters - f.write(str(hyper_parameters)) + f.write("Parameters: %s" % str(hyper_parameters)) f.write('\n') hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) @@ -253,23 +255,28 @@ def optimizing_function(**placeholders): model_dir = tempfile.mkdtemp() # Add it on to the information needed for the constructor hyper_parameters["model_dir"] = model_dir + ########################################## + print("hyper_parameters") + print(hyper_parameters) + ########################################## model = self.model_builder(**hyper_parameters) model.fit(train_dataset) + ########################################## + print("SAVING MODEL") + ########################################## try: model.save() # Some models autosave except NotImplementedError: pass - #evaluator = Evaluator(model, valid_dataset, transformers) - #multitask_scores = evaluator.compute_model_performance([metric]) multitask_scores = model.evaluate(valid_dataset, [metric]) score = multitask_scores[metric.name] if log_file: with open(log_file, 'a') as f: # Record performances - f.write(str(score)) + f.write("Score: %s" % str(score)) f.write('\n') # Store all results all_results[hp_str] = score @@ -307,6 +314,9 @@ def optimizing_function(**placeholders): model_dir = model_locations[hp_str] hyper_parameters["model_dir"] = model_dir best_model = self.model_builder(**hyper_parameters) + ########################################## + print("RESTORING BEST MODEL") + ########################################## # Some models need to be explicitly reloaded try: best_model.restore() diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 56812ef0eb..8802c90b77 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -34,68 +34,68 @@ def rf_model_builder(**model_params): self.valid_dataset = dc.data.NumpyDataset( X=np.random.rand(20, 5), y=np.random.rand(20, 1)) - def test_rf_example(self): - """Test a simple example of optimizing a RF model with a gaussian process.""" - - optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) - params_dict = {"n_estimators": 10} - transformers = [] - metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - self.train_dataset, - self.valid_dataset, - transformers, - metric, - max_iter=2) - - valid_score = best_model.evaluate(self.valid_dataset, [metric], - transformers) - assert valid_score["pearson_r2_score"] == max(all_results.values()) - assert valid_score["pearson_r2_score"] > 0 - - def test_rf_example_min(self): - """Test a simple example of optimizing a RF model with a gaussian process looking for minimum score.""" - - optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) - params_dict = {"n_estimators": 10} - transformers = [] - metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - self.train_dataset, - self.valid_dataset, - transformers, - metric, - use_max=False, - max_iter=2) - - valid_score = best_model.evaluate(self.valid_dataset, [metric], - transformers) - assert valid_score["pearson_r2_score"] == min(all_results.values()) - assert valid_score["pearson_r2_score"] > 0 - - def test_rf_with_logdir(self): - """Test that using a logdir can work correctly.""" - optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) - params_dict = {"n_estimators": 10} - transformers = [] - metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) - with tempfile.TemporaryDirectory() as tmpdirname: - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - self.train_dataset, - self.valid_dataset, - transformers, - metric, - logdir=tmpdirname, - max_iter=2) - valid_score = best_model.evaluate(self.valid_dataset, [metric], - transformers) - assert valid_score["pearson_r2_score"] == max(all_results.values()) - assert valid_score["pearson_r2_score"] > 0 +# def test_rf_example(self): +# """Test a simple example of optimizing a RF model with a gaussian process.""" +# +# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) +# params_dict = {"n_estimators": 10} +# transformers = [] +# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +# +# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( +# params_dict, +# self.train_dataset, +# self.valid_dataset, +# transformers, +# metric, +# max_iter=2) +# +# valid_score = best_model.evaluate(self.valid_dataset, [metric], +# transformers) +# assert valid_score["pearson_r2_score"] == max(all_results.values()) +# assert valid_score["pearson_r2_score"] > 0 +# +# def test_rf_example_min(self): +# """Test a simple example of optimizing a RF model with a gaussian process looking for minimum score.""" +# +# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) +# params_dict = {"n_estimators": 10} +# transformers = [] +# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +# +# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( +# params_dict, +# self.train_dataset, +# self.valid_dataset, +# transformers, +# metric, +# use_max=False, +# max_iter=2) +# +# valid_score = best_model.evaluate(self.valid_dataset, [metric], +# transformers) +# assert valid_score["pearson_r2_score"] == min(all_results.values()) +# assert valid_score["pearson_r2_score"] > 0 +# +# def test_rf_with_logdir(self): +# """Test that using a logdir can work correctly.""" +# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) +# params_dict = {"n_estimators": 10} +# transformers = [] +# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) +# with tempfile.TemporaryDirectory() as tmpdirname: +# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( +# params_dict, +# self.train_dataset, +# self.valid_dataset, +# transformers, +# metric, +# logdir=tmpdirname, +# max_iter=2) +# valid_score = best_model.evaluate(self.valid_dataset, [metric], +# transformers) +# assert valid_score["pearson_r2_score"] == max(all_results.values()) +# assert valid_score["pearson_r2_score"] > 0 @flaky def test_multitask_example(self): @@ -125,56 +125,57 @@ def test_multitask_example(self): valid_dataset, transformers, metric, - max_iter=2, + max_iter=1, use_max=False) valid_score = best_model.evaluate(valid_dataset, [metric]) assert valid_score["mean-mean_squared_error"] == min(all_results.values()) assert valid_score["mean-mean_squared_error"] > 0 - @flaky - def test_multitask_example_different_search_range(self): - """Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range.""" - # Generate dummy dataset - np.random.seed(123) - train_dataset = dc.data.NumpyDataset( - np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), - np.arange(10)) - valid_dataset = dc.data.NumpyDataset( - np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) - optimizer = dc.hyper.GaussianProcessHyperparamOpt( - lambda **p: dc.models.MultitaskRegressor( - n_tasks=2, - n_features=3, - dropouts=[0.], - weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], - #learning_rate=0.003, **p)) - **p)) - - params_dict = {"learning_rate": 0.003, "batch_size": 10} - # These are per-example multiplier - search_range = {"learning_rate": 10, "batch_size": 4} - transformers = [] - metric = dc.metrics.Metric( - dc.metrics.mean_squared_error, task_averager=np.mean) - - with tempfile.TemporaryDirectory() as tmpdirname: - best_model, best_hyperparams, all_results = optimizer.hyperparam_search( - params_dict, - train_dataset, - valid_dataset, - transformers, - metric, - max_iter=2, - logdir=tmpdirname, - search_range=search_range, - use_max=False) - valid_score = best_model.evaluate(valid_dataset, [metric]) - # Test that 2 parameters were optimized - for hp_str in all_results.keys(): - # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example - assert "batch_size" in hp_str - assert "learning_rate" in hp_str - assert valid_score["mean-mean_squared_error"] == min(all_results.values()) - assert valid_score["mean-mean_squared_error"] > 0 +# @flaky +# def test_multitask_example_different_search_range(self): +# """Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range.""" +# # Generate dummy dataset +# np.random.seed(123) +# train_dataset = dc.data.NumpyDataset( +# np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), +# np.arange(10)) +# valid_dataset = dc.data.NumpyDataset( +# np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) +# +# optimizer = dc.hyper.GaussianProcessHyperparamOpt( +# lambda **p: dc.models.MultitaskRegressor( +# n_tasks=2, +# n_features=3, +# dropouts=[0.], +# weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], +# #learning_rate=0.003, **p)) +# **p)) +# +# params_dict = {"learning_rate": 0.003, "batch_size": 10} +# # These are per-example multiplier +# search_range = {"learning_rate": 10, "batch_size": 4} +# transformers = [] +# metric = dc.metrics.Metric( +# dc.metrics.mean_squared_error, task_averager=np.mean) +# +# with tempfile.TemporaryDirectory() as tmpdirname: +# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( +# params_dict, +# train_dataset, +# valid_dataset, +# transformers, +# metric, +# max_iter=2, +# logdir=tmpdirname, +# search_range=search_range, +# use_max=False) +# valid_score = best_model.evaluate(valid_dataset, [metric]) +# # Test that 2 parameters were optimized +# for hp_str in all_results.keys(): +# # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example +# assert "batch_size" in hp_str +# assert "learning_rate" in hp_str +# assert valid_score["mean-mean_squared_error"] == min(all_results.values()) +# assert valid_score["mean-mean_squared_error"] > 0 diff --git a/examples/hyperparam_opt/gaussian_hyperparam_opt.py b/examples/hyperparam_opt/gaussian_hyperparam_opt.py index 1fc654c04f..0c47b62126 100644 --- a/examples/hyperparam_opt/gaussian_hyperparam_opt.py +++ b/examples/hyperparam_opt/gaussian_hyperparam_opt.py @@ -18,7 +18,7 @@ params_dict = {"dropout": 0.5} best_model, best_params, all_results = optimizer.hyperparam_search( - params_dict, train, valid, transformers, metric, max_iter=2, search_range=2) + params_dict, train, valid, transformers, metric, max_iter=1, search_range=2) valid_score = best_model.evaluate(valid, [metric], transformers) print("valid_score") From 608ef37a6eddd05c686e7afa7b922a60d786afca Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Thu, 2 Jul 2020 16:12:04 -0700 Subject: [PATCH 21/23] Finished --- deepchem/hyper/gaussian_process.py | 26 +-- .../tests/test_gaussian_hyperparam_opt.py | 215 +++++++++-------- deepchem/molnet/preset_hyper_parameters.py | 6 +- deepchem/molnet/run_benchmark.py | 219 +++++------------- deepchem/molnet/run_benchmark_models.py | 2 - docs/featurizers.rst | 19 ++ docs/moleculenet.rst | 84 ++++--- 7 files changed, 244 insertions(+), 327 deletions(-) diff --git a/deepchem/hyper/gaussian_process.py b/deepchem/hyper/gaussian_process.py index 4bc79e01f3..9d3724cfc3 100644 --- a/deepchem/hyper/gaussian_process.py +++ b/deepchem/hyper/gaussian_process.py @@ -205,6 +205,8 @@ def hyperparam_search(self, # Stores all results all_results = {} + # Store all model references so we don't have to reload + all_models = {} # Stores all model locations model_locations = {} @@ -255,15 +257,8 @@ def optimizing_function(**placeholders): model_dir = tempfile.mkdtemp() # Add it on to the information needed for the constructor hyper_parameters["model_dir"] = model_dir - ########################################## - print("hyper_parameters") - print(hyper_parameters) - ########################################## model = self.model_builder(**hyper_parameters) model.fit(train_dataset) - ########################################## - print("SAVING MODEL") - ########################################## try: model.save() # Some models autosave @@ -280,6 +275,8 @@ def optimizing_function(**placeholders): f.write('\n') # Store all results all_results[hp_str] = score + # Store reference to model + all_models[hp_str] = model model_locations[hp_str] = model_dir # GPGO maximize performance by default, set performance to its negative value for minimization if use_max: @@ -310,19 +307,8 @@ def optimizing_function(**placeholders): hyper_parameters[hp] = float(hp_opt[hp]) hp_str = _convert_hyperparam_dict_to_filename(hyper_parameters) - # Let's reinitialize the model with the best parameters - model_dir = model_locations[hp_str] - hyper_parameters["model_dir"] = model_dir - best_model = self.model_builder(**hyper_parameters) - ########################################## - print("RESTORING BEST MODEL") - ########################################## - # Some models need to be explicitly reloaded - try: - best_model.restore() - # Some models auto reload - except NotImplementedError: - pass + # Let's fetch the model with the best parameters + best_model = all_models[hp_str] # Compare best model to default hyperparameters if log_file: diff --git a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py index 8802c90b77..f1390a01b2 100644 --- a/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py +++ b/deepchem/hyper/tests/test_gaussian_hyperparam_opt.py @@ -34,68 +34,68 @@ def rf_model_builder(**model_params): self.valid_dataset = dc.data.NumpyDataset( X=np.random.rand(20, 5), y=np.random.rand(20, 1)) -# def test_rf_example(self): -# """Test a simple example of optimizing a RF model with a gaussian process.""" -# -# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) -# params_dict = {"n_estimators": 10} -# transformers = [] -# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) -# -# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( -# params_dict, -# self.train_dataset, -# self.valid_dataset, -# transformers, -# metric, -# max_iter=2) -# -# valid_score = best_model.evaluate(self.valid_dataset, [metric], -# transformers) -# assert valid_score["pearson_r2_score"] == max(all_results.values()) -# assert valid_score["pearson_r2_score"] > 0 -# -# def test_rf_example_min(self): -# """Test a simple example of optimizing a RF model with a gaussian process looking for minimum score.""" -# -# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) -# params_dict = {"n_estimators": 10} -# transformers = [] -# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) -# -# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( -# params_dict, -# self.train_dataset, -# self.valid_dataset, -# transformers, -# metric, -# use_max=False, -# max_iter=2) -# -# valid_score = best_model.evaluate(self.valid_dataset, [metric], -# transformers) -# assert valid_score["pearson_r2_score"] == min(all_results.values()) -# assert valid_score["pearson_r2_score"] > 0 -# -# def test_rf_with_logdir(self): -# """Test that using a logdir can work correctly.""" -# optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) -# params_dict = {"n_estimators": 10} -# transformers = [] -# metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) -# with tempfile.TemporaryDirectory() as tmpdirname: -# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( -# params_dict, -# self.train_dataset, -# self.valid_dataset, -# transformers, -# metric, -# logdir=tmpdirname, -# max_iter=2) -# valid_score = best_model.evaluate(self.valid_dataset, [metric], -# transformers) -# assert valid_score["pearson_r2_score"] == max(all_results.values()) -# assert valid_score["pearson_r2_score"] > 0 + def test_rf_example(self): + """Test a simple example of optimizing a RF model with a gaussian process.""" + + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": 10} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + max_iter=2) + + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] == max(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 + + def test_rf_example_min(self): + """Test a simple example of optimizing a RF model with a gaussian process looking for minimum score.""" + + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": 10} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + use_max=False, + max_iter=2) + + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] == min(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 + + def test_rf_with_logdir(self): + """Test that using a logdir can work correctly.""" + optimizer = dc.hyper.GaussianProcessHyperparamOpt(self.rf_model_builder) + params_dict = {"n_estimators": 10} + transformers = [] + metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + self.train_dataset, + self.valid_dataset, + transformers, + metric, + logdir=tmpdirname, + max_iter=2) + valid_score = best_model.evaluate(self.valid_dataset, [metric], + transformers) + assert valid_score["pearson_r2_score"] == max(all_results.values()) + assert valid_score["pearson_r2_score"] > 0 @flaky def test_multitask_example(self): @@ -132,50 +132,49 @@ def test_multitask_example(self): assert valid_score["mean-mean_squared_error"] == min(all_results.values()) assert valid_score["mean-mean_squared_error"] > 0 + @flaky + def test_multitask_example_different_search_range(self): + """Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range.""" + # Generate dummy dataset + np.random.seed(123) + train_dataset = dc.data.NumpyDataset( + np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), + np.arange(10)) + valid_dataset = dc.data.NumpyDataset( + np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) -# @flaky -# def test_multitask_example_different_search_range(self): -# """Test a simple example of optimizing a multitask model with a gaussian process search with per-parameter search range.""" -# # Generate dummy dataset -# np.random.seed(123) -# train_dataset = dc.data.NumpyDataset( -# np.random.rand(10, 3), np.zeros((10, 2)), np.ones((10, 2)), -# np.arange(10)) -# valid_dataset = dc.data.NumpyDataset( -# np.random.rand(5, 3), np.zeros((5, 2)), np.ones((5, 2)), np.arange(5)) -# -# optimizer = dc.hyper.GaussianProcessHyperparamOpt( -# lambda **p: dc.models.MultitaskRegressor( -# n_tasks=2, -# n_features=3, -# dropouts=[0.], -# weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], -# #learning_rate=0.003, **p)) -# **p)) -# -# params_dict = {"learning_rate": 0.003, "batch_size": 10} -# # These are per-example multiplier -# search_range = {"learning_rate": 10, "batch_size": 4} -# transformers = [] -# metric = dc.metrics.Metric( -# dc.metrics.mean_squared_error, task_averager=np.mean) -# -# with tempfile.TemporaryDirectory() as tmpdirname: -# best_model, best_hyperparams, all_results = optimizer.hyperparam_search( -# params_dict, -# train_dataset, -# valid_dataset, -# transformers, -# metric, -# max_iter=2, -# logdir=tmpdirname, -# search_range=search_range, -# use_max=False) -# valid_score = best_model.evaluate(valid_dataset, [metric]) -# # Test that 2 parameters were optimized -# for hp_str in all_results.keys(): -# # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example -# assert "batch_size" in hp_str -# assert "learning_rate" in hp_str -# assert valid_score["mean-mean_squared_error"] == min(all_results.values()) -# assert valid_score["mean-mean_squared_error"] > 0 + optimizer = dc.hyper.GaussianProcessHyperparamOpt( + lambda **p: dc.models.MultitaskRegressor( + n_tasks=2, + n_features=3, + dropouts=[0.], + weight_init_stddevs=[np.sqrt(6) / np.sqrt(1000)], + #learning_rate=0.003, **p)) + **p)) + + params_dict = {"learning_rate": 0.003, "batch_size": 10} + # These are per-example multiplier + search_range = {"learning_rate": 10, "batch_size": 4} + transformers = [] + metric = dc.metrics.Metric( + dc.metrics.mean_squared_error, task_averager=np.mean) + + with tempfile.TemporaryDirectory() as tmpdirname: + best_model, best_hyperparams, all_results = optimizer.hyperparam_search( + params_dict, + train_dataset, + valid_dataset, + transformers, + metric, + max_iter=2, + logdir=tmpdirname, + search_range=search_range, + use_max=False) + valid_score = best_model.evaluate(valid_dataset, [metric]) + # Test that 2 parameters were optimized + for hp_str in all_results.keys(): + # Recall that the key is a string of the form _batch_size_39_learning_rate_0.01 for example + assert "batch_size" in hp_str + assert "learning_rate" in hp_str + assert valid_score["mean-mean_squared_error"] == min(all_results.values()) + assert valid_score["mean-mean_squared_error"] > 0 diff --git a/deepchem/molnet/preset_hyper_parameters.py b/deepchem/molnet/preset_hyper_parameters.py index fe2fcbee85..571d6943b2 100644 --- a/deepchem/molnet/preset_hyper_parameters.py +++ b/deepchem/molnet/preset_hyper_parameters.py @@ -1,9 +1,5 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- """ -Created on Tue Mar 7 00:07:10 2017 - -@author: zqwu +This file holds the current best set of hyperparameters for the Molnet benchmark. """ import deepchem diff --git a/deepchem/molnet/run_benchmark.py b/deepchem/molnet/run_benchmark.py index c9cb3d7df8..cedc3f7fae 100644 --- a/deepchem/molnet/run_benchmark.py +++ b/deepchem/molnet/run_benchmark.py @@ -1,12 +1,10 @@ -# -*- coding: utf-8 -*- """ -Created on Mon Mar 06 14:25:40 2017 - -@author: Zhenqin Wu +This file provides utilities to run the MoleculeNet benchmark suite. """ import os import time import csv +import logging import numpy as np import tensorflow as tf import deepchem @@ -15,6 +13,43 @@ from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit from deepchem.molnet.preset_hyper_parameters import hps +logger = logging.getLogger(__name__) + +# Loading functions available +loading_functions = { + 'bace_c': deepchem.molnet.load_bace_classification, + 'bace_r': deepchem.molnet.load_bace_regression, + 'bbbp': deepchem.molnet.load_bbbp, + 'chembl': deepchem.molnet.load_chembl, + 'clearance': deepchem.molnet.load_clearance, + 'clintox': deepchem.molnet.load_clintox, + 'delaney': deepchem.molnet.load_delaney, + 'factors': deepchem.molnet.load_factors, + 'hiv': deepchem.molnet.load_hiv, + 'hopv': deepchem.molnet.load_hopv, + 'hppb': deepchem.molnet.load_hppb, + 'kaggle': deepchem.molnet.load_kaggle, + 'kinase': deepchem.molnet.load_kinase, + 'lipo': deepchem.molnet.load_lipo, + 'muv': deepchem.molnet.load_muv, + 'nci': deepchem.molnet.load_nci, + 'pcba': deepchem.molnet.load_pcba, + 'pcba_146': deepchem.molnet.load_pcba_146, + 'pcba_2475': deepchem.molnet.load_pcba_2475, + 'pdbbind': deepchem.molnet.load_pdbbind_grid, + 'ppb': deepchem.molnet.load_ppb, + 'qm7': deepchem.molnet.load_qm7_from_mat, + 'qm7b': deepchem.molnet.load_qm7b_from_mat, + 'qm8': deepchem.molnet.load_qm8, + 'qm9': deepchem.molnet.load_qm9, + 'sampl': deepchem.molnet.load_sampl, + 'sider': deepchem.molnet.load_sider, + 'thermosol': deepchem.molnet.load_thermosol, + 'tox21': deepchem.molnet.load_tox21, + 'toxcast': deepchem.molnet.load_toxcast, + 'uv': deepchem.molnet.load_uv, +} + def run_benchmark(datasets, model, @@ -31,16 +66,21 @@ def run_benchmark(datasets, test=False, reload=True, seed=123): - """ - Run benchmark test on designated datasets with deepchem(or user-defined) model + """Run MoleculeNet benchmark suite. + + This is a utility function to help run the MoleculeNet benchmark + suite on a specified model and a specified dataset. + + Run benchmark test on designated datasets with deepchem(or + user-defined) model. Parameters ---------- datasets: list of string - choice of which datasets to use, should be: bace_c, bace_r, bbbp, chembl, - clearance, clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba, - pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast, uv, factors, - kinase + choice of which datasets to use, should be one of: bace_c, + bace_r, bbbp, chembl, clearance, clintox, delaney, hiv, hopv, + kaggle, lipo, muv, nci, pcba, pdbbind, ppb, qm7, qm7b, qm8, qm9, + sampl, sider, tox21, toxcast, uv, factors, kinase model: string or user-defined model stucture choice of which model to use, deepchem provides implementation of logistic regression, random forest, multitask network, @@ -49,10 +89,10 @@ def run_benchmark(datasets, split: string, optional (default=None) choice of splitter function, None = using the default splitter metric: string, optional (default=None) - choice of evaluation metrics, None = using the default metrics(AUC & R2) - direction: bool, optional(default=True) - Optimization direction when doing hyperparameter search - Maximization(True) or minimization(False) + Choice of evaluation metrics, None = using the default metrics(AUC & R2) + use_max: bool, (default True) + Specifies whether to maximize or minimize `metric`. + maximization(True) or minimization(False) featurizer: string or dc.feat.Featurizer, optional (default=None) choice of featurization, None = using the default corresponding to model (string only applicable to deepchem models) @@ -110,46 +150,12 @@ def run_benchmark(datasets, if not split in [None] + CheckSplit[dataset]: continue - loading_functions = { - 'bace_c': deepchem.molnet.load_bace_classification, - 'bace_r': deepchem.molnet.load_bace_regression, - 'bbbp': deepchem.molnet.load_bbbp, - 'chembl': deepchem.molnet.load_chembl, - 'clearance': deepchem.molnet.load_clearance, - 'clintox': deepchem.molnet.load_clintox, - 'delaney': deepchem.molnet.load_delaney, - 'factors': deepchem.molnet.load_factors, - 'hiv': deepchem.molnet.load_hiv, - 'hopv': deepchem.molnet.load_hopv, - 'hppb': deepchem.molnet.load_hppb, - 'kaggle': deepchem.molnet.load_kaggle, - 'kinase': deepchem.molnet.load_kinase, - 'lipo': deepchem.molnet.load_lipo, - 'muv': deepchem.molnet.load_muv, - 'nci': deepchem.molnet.load_nci, - 'pcba': deepchem.molnet.load_pcba, - 'pcba_146': deepchem.molnet.load_pcba_146, - 'pcba_2475': deepchem.molnet.load_pcba_2475, - 'pdbbind': deepchem.molnet.load_pdbbind_grid, - 'ppb': deepchem.molnet.load_ppb, - 'qm7': deepchem.molnet.load_qm7_from_mat, - 'qm7b': deepchem.molnet.load_qm7b_from_mat, - 'qm8': deepchem.molnet.load_qm8, - 'qm9': deepchem.molnet.load_qm9, - 'sampl': deepchem.molnet.load_sampl, - 'sider': deepchem.molnet.load_sider, - 'thermosol': deepchem.molnet.load_thermosol, - 'tox21': deepchem.molnet.load_tox21, - 'toxcast': deepchem.molnet.load_toxcast, - 'uv': deepchem.molnet.load_uv, - } - - print('-------------------------------------') - print('Benchmark on dataset: %s' % dataset) - print('-------------------------------------') + logger.info('-------------------------------------') + logger.info('Benchmark on dataset: %s' % dataset) + logger.info('-------------------------------------') # loading datasets if split is not None: - print('Splitting function: %s' % split) + logger.info('Splitting function: %s' % split) tasks, all_dataset, transformers = loading_functions[dataset]( featurizer=featurizer, split=split, reload=reload) else: @@ -173,8 +179,7 @@ def run_benchmark(datasets, valid_dataset, transformers, metric, - direction=direction, - n_features=n_features, + use_max=use_max, n_tasks=len(tasks), max_iter=max_iter, search_range=search_range) @@ -187,7 +192,6 @@ def run_benchmark(datasets, test_dataset, tasks, transformers, - n_features, metric, model, test=test, @@ -235,108 +239,3 @@ def run_benchmark(datasets, if hyper_param_search: with open(os.path.join(out_path, dataset + model + '.pkl'), 'w') as f: pickle.dump(hyper_parameters, f) - - -# -# Note by @XericZephyr. Reason why I spun off this function: -# 1. Some model needs dataset information. -# 2. It offers us possibility to **cache** the dataset -# if the featurizer runs very slow, e.g., GraphConv. -# 2+. The cache can even happen at Travis CI to accelerate -# CI testing. -# -def load_dataset(dataset, featurizer, split='random'): - """ - Load specific dataset for benchmark. - - Parameters - ---------- - dataset: string - choice of which datasets to use, should be: tox21, muv, sider, - toxcast, pcba, delaney, factors, hiv, hopv, kaggle, kinase, nci, - clintox, hiv, pcba_128, pcba_146, pdbbind, chembl, qm7, qm7b, qm9, - sampl, uv - featurizer: string or dc.feat.Featurizer. - choice of featurization. - split: string, optional (default=None) - choice of splitter function, None = using the default splitter - """ - dataset_loading_functions = { - 'bace_c': deepchem.molnet.load_bace_classification, - 'bace_r': deepchem.molnet.load_bace_regression, - 'bbbp': deepchem.molnet.load_bbbp, - 'chembl': deepchem.molnet.load_chembl, - 'clearance': deepchem.molnet.load_clearance, - 'clintox': deepchem.molnet.load_clintox, - 'delaney': deepchem.molnet.load_delaney, - 'factors': deepchem.molnet.load_factors, - 'hiv': deepchem.molnet.load_hiv, - 'hopv': deepchem.molnet.load_hopv, - 'hppb': deepchem.molnet.load_hppb, - 'kaggle': deepchem.molnet.load_kaggle, - 'kinase': deepchem.molnet.load_kinase, - 'lipo': deepchem.molnet.load_lipo, - 'muv': deepchem.molnet.load_muv, - 'nci': deepchem.molnet.load_nci, - 'pcba': deepchem.molnet.load_pcba, - 'pcba_128': deepchem.molnet.load_pcba_128, - 'pcba_146': deepchem.molnet.load_pcba_146, - 'pcba_2475': deepchem.molnet.load_pcba_2475, - 'pdbbind': deepchem.molnet.load_pdbbind_grid, - 'ppb': deepchem.molnet.load_ppb, - 'qm7': deepchem.molnet.load_qm7_from_mat, - 'qm7b': deepchem.molnet.load_qm7b_from_mat, - 'qm8': deepchem.molnet.load_qm8, - 'qm9': deepchem.molnet.load_qm9, - 'sampl': deepchem.molnet.load_sampl, - 'sider': deepchem.molnet.load_sider, - 'thermosol': deepchem.molnet.load_thermosol, - 'tox21': deepchem.molnet.load_tox21, - 'toxcast': deepchem.molnet.load_toxcast, - 'uv': deepchem.molnet.load_uv - } - print('-------------------------------------') - print('Loading dataset: %s' % dataset) - print('-------------------------------------') - # loading datasets - if split is not None: - print('Splitting function: %s' % split) - tasks, all_dataset, transformers = dataset_loading_functions[dataset]( - featurizer=featurizer, split=split) - return tasks, all_dataset, transformers - - -def benchmark_model(model, all_dataset, transformers, metric, test=False): - """ - Benchmark custom model. - - model: user-defined model stucture - For user define model, it should include function: fit, evaluate. - - all_dataset: (train, test, val) data tuple. - Returned by `load_dataset` function. - - transformers - - metric: string - choice of evaluation metrics. - - - """ - time_start_fitting = time.time() - train_score = .0 - valid_score = .0 - test_score = .0 - - train_dataset, valid_dataset, test_dataset = all_dataset - - model.fit(train_dataset) - train_score = model.evaluate(train_dataset, metric, transformers) - valid_score = model.evaluate(valid_dataset, metric, transformers) - if test: - test_score = model.evaluate(test_dataset, metric, transformers) - - time_finish_fitting = time.time() - time_for_running = time_finish_fitting - time_start_fitting - - return train_score, valid_score, test_score, time_for_running diff --git a/deepchem/molnet/run_benchmark_models.py b/deepchem/molnet/run_benchmark_models.py index 37c80a0669..c208611b48 100644 --- a/deepchem/molnet/run_benchmark_models.py +++ b/deepchem/molnet/run_benchmark_models.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python2 -# -*- coding: utf-8 -*- """ Created on Mon Mar 6 23:41:26 2017 diff --git a/docs/featurizers.rst b/docs/featurizers.rst index 384f7cfa4f..8fbe4fc966 100644 --- a/docs/featurizers.rst +++ b/docs/featurizers.rst @@ -22,6 +22,25 @@ nevertheless, deep learning systems can't simply chew up raw files. For this reason, :code:`deepchem` provides an extensive collection of featurization methods which we will review on this page. +Featurizer-Model Matchups +------------------------- + +If you're using DeepChem in practical applications, you probably want +to use a given model on some dataset. Your first question when you try +to do this will probably be which featurizer should I use? + ++------------+--------------------------+-----------+ +| Model | Acceptable Featurizers | Header 3 | ++============+==========================+===========+ +| body row 1 | column 2 | column 3 | ++------------+------------+-----------+ +| body row 2 | Cells may span columns.| ++------------+------------+-----------+ +| body row 3 | Cells may | - Cells | ++------------+ span rows. | - contain | +| body row 4 | | - blocks. | ++------------+------------+-----------+ + Featurizer ---------- diff --git a/docs/moleculenet.rst b/docs/moleculenet.rst index cf241caf94..1871c78d59 100644 --- a/docs/moleculenet.rst +++ b/docs/moleculenet.rst @@ -2,121 +2,141 @@ MoleculeNet =========== The DeepChem library is packaged alongside the MoleculeNet suite of datasets. One of the most important parts of machine learning applications is finding a suitable dataset. The MoleculeNet suite has curated a whole range of datasets and loaded them into DeepChem :code:`dc.data.Dataset` objects for convenience. +Running Benchmark +----------------- + +At present, there is only support for running benchmark models + +.. autofunction:: deepchem.molnet.run_benchmark + +Best Known Hyperparameters +-------------------------- + +MoleculeNet maintains a list of the currently best known +hyperparameters for various models on MoleculeNet benchmarks. + +MoleculeNet Datasets +-------------------- + +MoleculeNet is actively maintained and contains a growing set of +different datasets. Here are the set of currently available +MoleculeNet datasets. + BACE Dataset ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_bace_classification .. autofunction:: deepchem.molnet.load_bace_regression BBBC Datasets -------------- +^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_bbbc001 .. autofunction:: deepchem.molnet.load_bbbc002 BBBP Datasets -------------- +^^^^^^^^^^^^^ BBBP stands for Blood-Brain-Barrier Penetration .. autofunction:: deepchem.molnet.load_bbbp Cell Counting Datasets ----------------------- +^^^^^^^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_cell_counting Chembl Datasets ---------------- +^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_chembl Chembl25 Datasets ---------------- +^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_chembl25 Clearance Datasets ------------------- +^^^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_clearance Clintox Datasets ----------------- +^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_clintox Delaney Datasets ----------------- +^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_delaney Factors Datasets ----------------- +^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_factors HIV Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_hiv HOPV Datasets -------------- +^^^^^^^^^^^^^ HOPV stands for the Harvard Organic Photovoltaic Dataset. .. autofunction:: deepchem.molnet.load_hopv HPPB Datasets -------------- +^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_hppb KAGGLE Datasets ---------------- +^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_kaggle Kinase Datasets ---------------- +^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_kinase Lipo Datasets -------------- +^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_lipo MUV Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_muv NCI Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_nci PCBA Datasets -------------- +^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_pcba PDBBIND Datasets ----------------- +^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_pdbbind PPB Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_ppb QM7 Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_qm7 @@ -125,54 +145,54 @@ QM7 Datasets .. autofunction:: deepchem.molnet.load_qm7b_from_mat QM8 Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_qm8 QM9 Datasets ------------- +^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_qm9 SAMPL Datasets --------------- +^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_sampl SIDER Datasets --------------- +^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_sider SWEETLEAD Datasets ------------------- +^^^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_sweetlead Thermosol Datasets ------------------- +^^^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_thermosol Tox21 Datasets --------------- +^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_tox21 Toxcast Datasets ----------------- +^^^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_toxcast USPTO Datasets --------------- +^^^^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_uspto UV Datasets ------------ +^^^^^^^^^^^ .. autofunction:: deepchem.molnet.load_uv From cc99dfb5becf523f6fda3fa7d583ce10214afbb8 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Thu, 2 Jul 2020 16:50:53 -0700 Subject: [PATCH 22/23] cleaning up changes --- deepchem/models/models.py | 2 +- deepchem/models/sklearn_models/__init__.py | 2 +- deepchem/molnet/run_benchmark.py | 219 +++++++++++++++------ deepchem/molnet/run_benchmark_models.py | 2 + docs/featurizers.rst | 19 -- docs/moleculenet.rst | 84 +++----- 6 files changed, 196 insertions(+), 132 deletions(-) diff --git a/deepchem/models/models.py b/deepchem/models/models.py index b6f7df235f..993d915054 100644 --- a/deepchem/models/models.py +++ b/deepchem/models/models.py @@ -77,7 +77,7 @@ def predict_on_batch(self, X, **kwargs): raise NotImplementedError( "Each model is responsible for its own predict_on_batch method.") - def restore(self): + def reload(self): """ Reload trained model from disk. """ diff --git a/deepchem/models/sklearn_models/__init__.py b/deepchem/models/sklearn_models/__init__.py index b5cf0a007c..dfcbe28209 100644 --- a/deepchem/models/sklearn_models/__init__.py +++ b/deepchem/models/sklearn_models/__init__.py @@ -92,7 +92,7 @@ def save(self): """Saves sklearn model to disk using joblib.""" save_to_disk(self.model_instance, self.get_model_filename(self.model_dir)) - def restore(self): + def reload(self): """Loads sklearn model from joblib file on disk.""" self.model_instance = load_from_disk( Model.get_model_filename(self.model_dir)) diff --git a/deepchem/molnet/run_benchmark.py b/deepchem/molnet/run_benchmark.py index cedc3f7fae..c9cb3d7df8 100644 --- a/deepchem/molnet/run_benchmark.py +++ b/deepchem/molnet/run_benchmark.py @@ -1,10 +1,12 @@ +# -*- coding: utf-8 -*- """ -This file provides utilities to run the MoleculeNet benchmark suite. +Created on Mon Mar 06 14:25:40 2017 + +@author: Zhenqin Wu """ import os import time import csv -import logging import numpy as np import tensorflow as tf import deepchem @@ -13,43 +15,6 @@ from deepchem.molnet.check_availability import CheckFeaturizer, CheckSplit from deepchem.molnet.preset_hyper_parameters import hps -logger = logging.getLogger(__name__) - -# Loading functions available -loading_functions = { - 'bace_c': deepchem.molnet.load_bace_classification, - 'bace_r': deepchem.molnet.load_bace_regression, - 'bbbp': deepchem.molnet.load_bbbp, - 'chembl': deepchem.molnet.load_chembl, - 'clearance': deepchem.molnet.load_clearance, - 'clintox': deepchem.molnet.load_clintox, - 'delaney': deepchem.molnet.load_delaney, - 'factors': deepchem.molnet.load_factors, - 'hiv': deepchem.molnet.load_hiv, - 'hopv': deepchem.molnet.load_hopv, - 'hppb': deepchem.molnet.load_hppb, - 'kaggle': deepchem.molnet.load_kaggle, - 'kinase': deepchem.molnet.load_kinase, - 'lipo': deepchem.molnet.load_lipo, - 'muv': deepchem.molnet.load_muv, - 'nci': deepchem.molnet.load_nci, - 'pcba': deepchem.molnet.load_pcba, - 'pcba_146': deepchem.molnet.load_pcba_146, - 'pcba_2475': deepchem.molnet.load_pcba_2475, - 'pdbbind': deepchem.molnet.load_pdbbind_grid, - 'ppb': deepchem.molnet.load_ppb, - 'qm7': deepchem.molnet.load_qm7_from_mat, - 'qm7b': deepchem.molnet.load_qm7b_from_mat, - 'qm8': deepchem.molnet.load_qm8, - 'qm9': deepchem.molnet.load_qm9, - 'sampl': deepchem.molnet.load_sampl, - 'sider': deepchem.molnet.load_sider, - 'thermosol': deepchem.molnet.load_thermosol, - 'tox21': deepchem.molnet.load_tox21, - 'toxcast': deepchem.molnet.load_toxcast, - 'uv': deepchem.molnet.load_uv, -} - def run_benchmark(datasets, model, @@ -66,21 +31,16 @@ def run_benchmark(datasets, test=False, reload=True, seed=123): - """Run MoleculeNet benchmark suite. - - This is a utility function to help run the MoleculeNet benchmark - suite on a specified model and a specified dataset. - - Run benchmark test on designated datasets with deepchem(or - user-defined) model. + """ + Run benchmark test on designated datasets with deepchem(or user-defined) model Parameters ---------- datasets: list of string - choice of which datasets to use, should be one of: bace_c, - bace_r, bbbp, chembl, clearance, clintox, delaney, hiv, hopv, - kaggle, lipo, muv, nci, pcba, pdbbind, ppb, qm7, qm7b, qm8, qm9, - sampl, sider, tox21, toxcast, uv, factors, kinase + choice of which datasets to use, should be: bace_c, bace_r, bbbp, chembl, + clearance, clintox, delaney, hiv, hopv, kaggle, lipo, muv, nci, pcba, + pdbbind, ppb, qm7, qm7b, qm8, qm9, sampl, sider, tox21, toxcast, uv, factors, + kinase model: string or user-defined model stucture choice of which model to use, deepchem provides implementation of logistic regression, random forest, multitask network, @@ -89,10 +49,10 @@ def run_benchmark(datasets, split: string, optional (default=None) choice of splitter function, None = using the default splitter metric: string, optional (default=None) - Choice of evaluation metrics, None = using the default metrics(AUC & R2) - use_max: bool, (default True) - Specifies whether to maximize or minimize `metric`. - maximization(True) or minimization(False) + choice of evaluation metrics, None = using the default metrics(AUC & R2) + direction: bool, optional(default=True) + Optimization direction when doing hyperparameter search + Maximization(True) or minimization(False) featurizer: string or dc.feat.Featurizer, optional (default=None) choice of featurization, None = using the default corresponding to model (string only applicable to deepchem models) @@ -150,12 +110,46 @@ def run_benchmark(datasets, if not split in [None] + CheckSplit[dataset]: continue - logger.info('-------------------------------------') - logger.info('Benchmark on dataset: %s' % dataset) - logger.info('-------------------------------------') + loading_functions = { + 'bace_c': deepchem.molnet.load_bace_classification, + 'bace_r': deepchem.molnet.load_bace_regression, + 'bbbp': deepchem.molnet.load_bbbp, + 'chembl': deepchem.molnet.load_chembl, + 'clearance': deepchem.molnet.load_clearance, + 'clintox': deepchem.molnet.load_clintox, + 'delaney': deepchem.molnet.load_delaney, + 'factors': deepchem.molnet.load_factors, + 'hiv': deepchem.molnet.load_hiv, + 'hopv': deepchem.molnet.load_hopv, + 'hppb': deepchem.molnet.load_hppb, + 'kaggle': deepchem.molnet.load_kaggle, + 'kinase': deepchem.molnet.load_kinase, + 'lipo': deepchem.molnet.load_lipo, + 'muv': deepchem.molnet.load_muv, + 'nci': deepchem.molnet.load_nci, + 'pcba': deepchem.molnet.load_pcba, + 'pcba_146': deepchem.molnet.load_pcba_146, + 'pcba_2475': deepchem.molnet.load_pcba_2475, + 'pdbbind': deepchem.molnet.load_pdbbind_grid, + 'ppb': deepchem.molnet.load_ppb, + 'qm7': deepchem.molnet.load_qm7_from_mat, + 'qm7b': deepchem.molnet.load_qm7b_from_mat, + 'qm8': deepchem.molnet.load_qm8, + 'qm9': deepchem.molnet.load_qm9, + 'sampl': deepchem.molnet.load_sampl, + 'sider': deepchem.molnet.load_sider, + 'thermosol': deepchem.molnet.load_thermosol, + 'tox21': deepchem.molnet.load_tox21, + 'toxcast': deepchem.molnet.load_toxcast, + 'uv': deepchem.molnet.load_uv, + } + + print('-------------------------------------') + print('Benchmark on dataset: %s' % dataset) + print('-------------------------------------') # loading datasets if split is not None: - logger.info('Splitting function: %s' % split) + print('Splitting function: %s' % split) tasks, all_dataset, transformers = loading_functions[dataset]( featurizer=featurizer, split=split, reload=reload) else: @@ -179,7 +173,8 @@ def run_benchmark(datasets, valid_dataset, transformers, metric, - use_max=use_max, + direction=direction, + n_features=n_features, n_tasks=len(tasks), max_iter=max_iter, search_range=search_range) @@ -192,6 +187,7 @@ def run_benchmark(datasets, test_dataset, tasks, transformers, + n_features, metric, model, test=test, @@ -239,3 +235,108 @@ def run_benchmark(datasets, if hyper_param_search: with open(os.path.join(out_path, dataset + model + '.pkl'), 'w') as f: pickle.dump(hyper_parameters, f) + + +# +# Note by @XericZephyr. Reason why I spun off this function: +# 1. Some model needs dataset information. +# 2. It offers us possibility to **cache** the dataset +# if the featurizer runs very slow, e.g., GraphConv. +# 2+. The cache can even happen at Travis CI to accelerate +# CI testing. +# +def load_dataset(dataset, featurizer, split='random'): + """ + Load specific dataset for benchmark. + + Parameters + ---------- + dataset: string + choice of which datasets to use, should be: tox21, muv, sider, + toxcast, pcba, delaney, factors, hiv, hopv, kaggle, kinase, nci, + clintox, hiv, pcba_128, pcba_146, pdbbind, chembl, qm7, qm7b, qm9, + sampl, uv + featurizer: string or dc.feat.Featurizer. + choice of featurization. + split: string, optional (default=None) + choice of splitter function, None = using the default splitter + """ + dataset_loading_functions = { + 'bace_c': deepchem.molnet.load_bace_classification, + 'bace_r': deepchem.molnet.load_bace_regression, + 'bbbp': deepchem.molnet.load_bbbp, + 'chembl': deepchem.molnet.load_chembl, + 'clearance': deepchem.molnet.load_clearance, + 'clintox': deepchem.molnet.load_clintox, + 'delaney': deepchem.molnet.load_delaney, + 'factors': deepchem.molnet.load_factors, + 'hiv': deepchem.molnet.load_hiv, + 'hopv': deepchem.molnet.load_hopv, + 'hppb': deepchem.molnet.load_hppb, + 'kaggle': deepchem.molnet.load_kaggle, + 'kinase': deepchem.molnet.load_kinase, + 'lipo': deepchem.molnet.load_lipo, + 'muv': deepchem.molnet.load_muv, + 'nci': deepchem.molnet.load_nci, + 'pcba': deepchem.molnet.load_pcba, + 'pcba_128': deepchem.molnet.load_pcba_128, + 'pcba_146': deepchem.molnet.load_pcba_146, + 'pcba_2475': deepchem.molnet.load_pcba_2475, + 'pdbbind': deepchem.molnet.load_pdbbind_grid, + 'ppb': deepchem.molnet.load_ppb, + 'qm7': deepchem.molnet.load_qm7_from_mat, + 'qm7b': deepchem.molnet.load_qm7b_from_mat, + 'qm8': deepchem.molnet.load_qm8, + 'qm9': deepchem.molnet.load_qm9, + 'sampl': deepchem.molnet.load_sampl, + 'sider': deepchem.molnet.load_sider, + 'thermosol': deepchem.molnet.load_thermosol, + 'tox21': deepchem.molnet.load_tox21, + 'toxcast': deepchem.molnet.load_toxcast, + 'uv': deepchem.molnet.load_uv + } + print('-------------------------------------') + print('Loading dataset: %s' % dataset) + print('-------------------------------------') + # loading datasets + if split is not None: + print('Splitting function: %s' % split) + tasks, all_dataset, transformers = dataset_loading_functions[dataset]( + featurizer=featurizer, split=split) + return tasks, all_dataset, transformers + + +def benchmark_model(model, all_dataset, transformers, metric, test=False): + """ + Benchmark custom model. + + model: user-defined model stucture + For user define model, it should include function: fit, evaluate. + + all_dataset: (train, test, val) data tuple. + Returned by `load_dataset` function. + + transformers + + metric: string + choice of evaluation metrics. + + + """ + time_start_fitting = time.time() + train_score = .0 + valid_score = .0 + test_score = .0 + + train_dataset, valid_dataset, test_dataset = all_dataset + + model.fit(train_dataset) + train_score = model.evaluate(train_dataset, metric, transformers) + valid_score = model.evaluate(valid_dataset, metric, transformers) + if test: + test_score = model.evaluate(test_dataset, metric, transformers) + + time_finish_fitting = time.time() + time_for_running = time_finish_fitting - time_start_fitting + + return train_score, valid_score, test_score, time_for_running diff --git a/deepchem/molnet/run_benchmark_models.py b/deepchem/molnet/run_benchmark_models.py index c208611b48..37c80a0669 100644 --- a/deepchem/molnet/run_benchmark_models.py +++ b/deepchem/molnet/run_benchmark_models.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- """ Created on Mon Mar 6 23:41:26 2017 diff --git a/docs/featurizers.rst b/docs/featurizers.rst index 8fbe4fc966..384f7cfa4f 100644 --- a/docs/featurizers.rst +++ b/docs/featurizers.rst @@ -22,25 +22,6 @@ nevertheless, deep learning systems can't simply chew up raw files. For this reason, :code:`deepchem` provides an extensive collection of featurization methods which we will review on this page. -Featurizer-Model Matchups -------------------------- - -If you're using DeepChem in practical applications, you probably want -to use a given model on some dataset. Your first question when you try -to do this will probably be which featurizer should I use? - -+------------+--------------------------+-----------+ -| Model | Acceptable Featurizers | Header 3 | -+============+==========================+===========+ -| body row 1 | column 2 | column 3 | -+------------+------------+-----------+ -| body row 2 | Cells may span columns.| -+------------+------------+-----------+ -| body row 3 | Cells may | - Cells | -+------------+ span rows. | - contain | -| body row 4 | | - blocks. | -+------------+------------+-----------+ - Featurizer ---------- diff --git a/docs/moleculenet.rst b/docs/moleculenet.rst index 1871c78d59..cf241caf94 100644 --- a/docs/moleculenet.rst +++ b/docs/moleculenet.rst @@ -2,141 +2,121 @@ MoleculeNet =========== The DeepChem library is packaged alongside the MoleculeNet suite of datasets. One of the most important parts of machine learning applications is finding a suitable dataset. The MoleculeNet suite has curated a whole range of datasets and loaded them into DeepChem :code:`dc.data.Dataset` objects for convenience. -Running Benchmark ------------------ - -At present, there is only support for running benchmark models - -.. autofunction:: deepchem.molnet.run_benchmark - -Best Known Hyperparameters --------------------------- - -MoleculeNet maintains a list of the currently best known -hyperparameters for various models on MoleculeNet benchmarks. - -MoleculeNet Datasets --------------------- - -MoleculeNet is actively maintained and contains a growing set of -different datasets. Here are the set of currently available -MoleculeNet datasets. - BACE Dataset -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_bace_classification .. autofunction:: deepchem.molnet.load_bace_regression BBBC Datasets -^^^^^^^^^^^^^ +------------- .. autofunction:: deepchem.molnet.load_bbbc001 .. autofunction:: deepchem.molnet.load_bbbc002 BBBP Datasets -^^^^^^^^^^^^^ +------------- BBBP stands for Blood-Brain-Barrier Penetration .. autofunction:: deepchem.molnet.load_bbbp Cell Counting Datasets -^^^^^^^^^^^^^^^^^^^^^^ +---------------------- .. autofunction:: deepchem.molnet.load_cell_counting Chembl Datasets -^^^^^^^^^^^^^^^ +--------------- .. autofunction:: deepchem.molnet.load_chembl Chembl25 Datasets -^^^^^^^^^^^^^^^ +--------------- .. autofunction:: deepchem.molnet.load_chembl25 Clearance Datasets -^^^^^^^^^^^^^^^^^^ +------------------ .. autofunction:: deepchem.molnet.load_clearance Clintox Datasets -^^^^^^^^^^^^^^^^ +---------------- .. autofunction:: deepchem.molnet.load_clintox Delaney Datasets -^^^^^^^^^^^^^^^^ +---------------- .. autofunction:: deepchem.molnet.load_delaney Factors Datasets -^^^^^^^^^^^^^^^^ +---------------- .. autofunction:: deepchem.molnet.load_factors HIV Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_hiv HOPV Datasets -^^^^^^^^^^^^^ +------------- HOPV stands for the Harvard Organic Photovoltaic Dataset. .. autofunction:: deepchem.molnet.load_hopv HPPB Datasets -^^^^^^^^^^^^^ +------------- .. autofunction:: deepchem.molnet.load_hppb KAGGLE Datasets -^^^^^^^^^^^^^^^ +--------------- .. autofunction:: deepchem.molnet.load_kaggle Kinase Datasets -^^^^^^^^^^^^^^^ +--------------- .. autofunction:: deepchem.molnet.load_kinase Lipo Datasets -^^^^^^^^^^^^^ +------------- .. autofunction:: deepchem.molnet.load_lipo MUV Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_muv NCI Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_nci PCBA Datasets -^^^^^^^^^^^^^ +------------- .. autofunction:: deepchem.molnet.load_pcba PDBBIND Datasets -^^^^^^^^^^^^^^^^ +---------------- .. autofunction:: deepchem.molnet.load_pdbbind PPB Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_ppb QM7 Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_qm7 @@ -145,54 +125,54 @@ QM7 Datasets .. autofunction:: deepchem.molnet.load_qm7b_from_mat QM8 Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_qm8 QM9 Datasets -^^^^^^^^^^^^ +------------ .. autofunction:: deepchem.molnet.load_qm9 SAMPL Datasets -^^^^^^^^^^^^^^ +-------------- .. autofunction:: deepchem.molnet.load_sampl SIDER Datasets -^^^^^^^^^^^^^^ +-------------- .. autofunction:: deepchem.molnet.load_sider SWEETLEAD Datasets -^^^^^^^^^^^^^^^^^^ +------------------ .. autofunction:: deepchem.molnet.load_sweetlead Thermosol Datasets -^^^^^^^^^^^^^^^^^^ +------------------ .. autofunction:: deepchem.molnet.load_thermosol Tox21 Datasets -^^^^^^^^^^^^^^ +-------------- .. autofunction:: deepchem.molnet.load_tox21 Toxcast Datasets -^^^^^^^^^^^^^^^^ +---------------- .. autofunction:: deepchem.molnet.load_toxcast USPTO Datasets -^^^^^^^^^^^^^^ +-------------- .. autofunction:: deepchem.molnet.load_uspto UV Datasets -^^^^^^^^^^^ +----------- .. autofunction:: deepchem.molnet.load_uv From 3301605dc9758c5aef4fdd8bd66005efac091612 Mon Sep 17 00:00:00 2001 From: Bharath Ramsundar Date: Thu, 2 Jul 2020 16:52:08 -0700 Subject: [PATCH 23/23] Change --- deepchem/molnet/preset_hyper_parameters.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/deepchem/molnet/preset_hyper_parameters.py b/deepchem/molnet/preset_hyper_parameters.py index 571d6943b2..fe2fcbee85 100644 --- a/deepchem/molnet/preset_hyper_parameters.py +++ b/deepchem/molnet/preset_hyper_parameters.py @@ -1,5 +1,9 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- """ -This file holds the current best set of hyperparameters for the Molnet benchmark. +Created on Tue Mar 7 00:07:10 2017 + +@author: zqwu """ import deepchem