Skip to content

Commit

Permalink
Merge 7792f51 into 766139b
Browse files Browse the repository at this point in the history
  • Loading branch information
lilleswing committed Nov 30, 2017
2 parents 766139b + 7792f51 commit 4d1cd29
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 17 deletions.
26 changes: 13 additions & 13 deletions deepchem/models/tensorgraph/models/graph_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
DTNNEmbedding, DTNNStep, DTNNGather, DAGLayer, \
DAGGather, DTNNExtract, MessagePassing, SetGather
from deepchem.models.tensorgraph.graph_layers import WeaveLayerFactory
from deepchem.models.tensorgraph.layers import Dense, Concat, SoftMax, \
from deepchem.models.tensorgraph.layers import Dense, SoftMax, \
SoftMaxCrossEntropy, GraphConv, BatchNorm, \
GraphPool, GraphGather, WeightedError, Dropout, BatchNormalization, Stack, Flatten, GraphCNN, GraphCNNPool
from deepchem.models.tensorgraph.layers import L2Loss, Label, Weights, Feature
Expand Down Expand Up @@ -116,7 +116,7 @@ def build_graph(self):
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
all_cost = Concat(in_layers=costs, axis=1)
all_cost = Stack(in_layers=costs, axis=1)
elif self.mode == "regression":
all_cost = Stack(in_layers=costs, axis=1)
self.weights = Weights(shape=(None, self.n_tasks))
Expand Down Expand Up @@ -174,8 +174,8 @@ def default_generator(self,
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
self.n_pair_feat)))
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, self.n_pair_feat)))

feed_dict[self.atom_features] = np.concatenate(atom_feat, axis=0)
feed_dict[self.pair_features] = np.concatenate(pair_feat, axis=0)
Expand Down Expand Up @@ -310,8 +310,8 @@ def default_generator(self,
num_atoms = list(map(sum, X_b.astype(bool)[:, :, 0]))
atom_number = [
np.round(
np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]), 1 /
2.4)).astype(int) for i in range(len(num_atoms))
np.power(2 * np.diag(X_b[i, :num_atoms[i], :num_atoms[i]]),
1 / 2.4)).astype(int) for i in range(len(num_atoms))
]
start = 0
for im, molecule in enumerate(atom_number):
Expand Down Expand Up @@ -425,7 +425,7 @@ def build_graph(self):
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
all_cost = Concat(in_layers=costs, axis=1)
all_cost = Stack(in_layers=costs, axis=1)
elif self.mode == "regression":
all_cost = Stack(in_layers=costs, axis=1)
self.weights = Weights(shape=(None, self.n_tasks))
Expand Down Expand Up @@ -570,7 +570,7 @@ def build_graph(self):
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
entropy = Concat(in_layers=costs, axis=-1)
entropy = Stack(in_layers=costs, axis=-1)
elif self.mode == "regression":
entropy = Stack(in_layers=costs, axis=1)
self.my_task_weights = Weights(shape=(None, self.n_tasks))
Expand Down Expand Up @@ -716,7 +716,7 @@ def build_graph(self):
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
entropy = Concat(in_layers=costs, axis=-1)
entropy = Stack(in_layers=costs, axis=1)
elif self.mode == "regression":
entropy = Stack(in_layers=costs, axis=1)
self.my_task_weights = Weights(shape=(None, self.n_tasks))
Expand Down Expand Up @@ -994,7 +994,7 @@ def build_graph(self):
cost = L2Loss(in_layers=[label, regression])
costs.append(cost)
if self.mode == "classification":
all_cost = Concat(in_layers=costs, axis=1)
all_cost = Stack(in_layers=costs, axis=1)
elif self.mode == "regression":
all_cost = Stack(in_layers=costs, axis=1)
self.weights = Weights(shape=(None, self.n_tasks))
Expand Down Expand Up @@ -1051,8 +1051,8 @@ def default_generator(self,
atom_feat.append(mol.get_atom_features())
# pair features
pair_feat.append(
np.reshape(mol.get_pair_features(), (n_atoms * n_atoms,
self.n_pair_feat)))
np.reshape(mol.get_pair_features(),
(n_atoms * n_atoms, self.n_pair_feat)))

feed_dict[self.atom_features] = np.concatenate(atom_feat, axis=0)
feed_dict[self.pair_features] = np.concatenate(pair_feat, axis=0)
Expand Down Expand Up @@ -1097,4 +1097,4 @@ def predict_proba_on_generator(self, generator, transformers=[]):
return np.concatenate(results, axis=0)

def predict_on_generator(self, generator, transformers=[]):
return self.predict_proba_on_generator(generator, transformers)
return self.predict_proba_on_generator(generator, transformers)
14 changes: 10 additions & 4 deletions deepchem/models/tensorgraph/models/test_graph_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,27 @@

class TestGraphModels(unittest.TestCase):

def get_dataset(self, mode='classification', featurizer='GraphConv'):
def get_dataset(self,
mode='classification',
featurizer='GraphConv',
num_tasks=2):
data_points = 10
tasks, all_dataset, transformers = load_delaney(featurizer)
train, valid, test = all_dataset
for i in range(1, num_tasks):
tasks.append("random_task")
w = np.ones(shape=(data_points, len(tasks)))

if mode == 'classification':
y = np.random.randint(0, 2, size=(data_points, len(tasks)))
metric = deepchem.metrics.Metric(
deepchem.metrics.roc_auc_score, np.mean, mode="classification")
if mode == 'regression':
else:
y = np.random.normal(size=(data_points, len(tasks)))
metric = deepchem.metrics.Metric(
deepchem.metrics.mean_absolute_error, mode="regression")

ds = NumpyDataset(train.X[:10], y, train.w[:10], train.ids[:10])
ds = NumpyDataset(train.X[:10], y, w, train.ids[:10])

return tasks, ds, transformers, metric

Expand Down Expand Up @@ -61,7 +67,7 @@ def test_graph_conv_regression_model(self):

def test_graph_conv_error_bars(self):
tasks, dataset, transformers, metric = self.get_dataset(
'regression', 'GraphConv')
'regression', 'GraphConv', num_tasks=1)

batch_size = 50
model = GraphConvTensorGraph(
Expand Down
1 change: 1 addition & 0 deletions deepchem/models/tensorgraph/tests/test_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,7 @@ def test_log(self):
with self.test_session() as sess:
result = Log()(value).eval()
assert np.array_equal(np.log(value), result)
assert np.all(np.isclose(np.log(value), result, atol=0.001))

def test_exp(self):
"""Test that Exp can be invoked."""
Expand Down

0 comments on commit 4d1cd29

Please sign in to comment.