Skip to content

Commit

Permalink
DAG_reload_fix
Browse files Browse the repository at this point in the history
  • Loading branch information
hsjang001205 committed Oct 16, 2020
1 parent 51a76d9 commit ba3455b
Show file tree
Hide file tree
Showing 2 changed files with 305 additions and 301 deletions.
207 changes: 111 additions & 96 deletions deepchem/models/layers.py
Expand Up @@ -72,8 +72,8 @@ def call(self, inputs):
# Shape (N_atoms, M_nbrs, ndim)
nbr_coords = tf.gather(coords, nbr_list)
# Shape (N_atoms, M_nbrs, ndim)
tiled_coords = tf.tile(
tf.reshape(coords, (N_atoms, 1, ndim)), (1, M_nbrs, 1))
tiled_coords = tf.tile(tf.reshape(coords, (N_atoms, 1, ndim)),
(1, M_nbrs, 1))
# Shape (N_atoms, M_nbrs)
return tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=2)

Expand Down Expand Up @@ -126,18 +126,16 @@ def build(self, input_shape):
# Generate the nb_affine weights and biases
num_deg = 2 * self.max_degree + (1 - self.min_degree)
self.W_list = [
self.add_weight(
name='kernel',
shape=(int(input_shape[0][-1]), self.out_channel),
initializer='glorot_uniform',
trainable=True) for k in range(num_deg)
self.add_weight(name='kernel',
shape=(int(input_shape[0][-1]), self.out_channel),
initializer='glorot_uniform',
trainable=True) for k in range(num_deg)
]
self.b_list = [
self.add_weight(
name='bias',
shape=(self.out_channel,),
initializer='zeros',
trainable=True) for k in range(num_deg)
self.add_weight(name='bias',
shape=(self.out_channel,),
initializer='zeros',
trainable=True) for k in range(num_deg)
]
self.built = True

Expand Down Expand Up @@ -432,10 +430,10 @@ def build(self, input_shape):
self.W = init((self.input_dim, 4 * self.output_dim))
self.U = inner_init((self.output_dim, 4 * self.output_dim))

self.b = tf.Variable(
np.hstack((np.zeros(self.output_dim), np.ones(self.output_dim),
np.zeros(self.output_dim), np.zeros(self.output_dim))),
dtype=tf.float32)
self.b = tf.Variable(np.hstack(
(np.zeros(self.output_dim), np.ones(self.output_dim),
np.zeros(self.output_dim), np.zeros(self.output_dim))),
dtype=tf.float32)
self.built = True

def call(self, inputs):
Expand Down Expand Up @@ -817,9 +815,9 @@ def get_config(self):
def build(self, input_shape):
init = tf.keras.initializers.RandomNormal(stddev=self.std)
self.input_weights = [
self.add_weight(
'weight_%d' % (i + 1), (1,), initializer=init, trainable=True)
for i in range(len(input_shape))
self.add_weight('weight_%d' % (i + 1), (1,),
initializer=init,
trainable=True) for i in range(len(input_shape))
]
self.built = True

Expand Down Expand Up @@ -870,8 +868,10 @@ def call(self, inputs, training=True):
mean_parent, std_parent = inputs[0], inputs[1]
noise_scale = tf.cast(training or not self.training_only, tf.float32)
from tensorflow.python.ops import array_ops
sample_noise = tf.random.normal(
array_ops.shape(mean_parent), 0, self.noise_epsilon, dtype=tf.float32)
sample_noise = tf.random.normal(array_ops.shape(mean_parent),
0,
self.noise_epsilon,
dtype=tf.float32)
return mean_parent + noise_scale * std_parent * sample_noise


Expand Down Expand Up @@ -1136,8 +1136,8 @@ def compute_nbr_list(self, coords):
nbr_coords = [tf.gather(coords, atom_nbrs) for atom_nbrs in nbrs]

# Add phantom atoms that exist far outside the box
coord_padding = tf.cast(
tf.fill((self.M_nbrs, self.ndim), 2 * self.stop), tf.float32)
coord_padding = tf.cast(tf.fill((self.M_nbrs, self.ndim), 2 * self.stop),
tf.float32)
padded_nbr_coords = [
tf.concat([nbr_coord, coord_padding], 0) for nbr_coord in nbr_coords
]
Expand Down Expand Up @@ -1230,8 +1230,8 @@ def get_closest_atoms(self, coords, cells):
N_atoms, n_cells, ndim, M_nbrs = (self.N_atoms, self.n_cells, self.ndim,
self.M_nbrs)
# Tile both cells and coords to form arrays of size (N_atoms*n_cells, ndim)
tiled_cells = tf.reshape(
tf.tile(cells, (1, N_atoms)), (N_atoms * n_cells, ndim))
tiled_cells = tf.reshape(tf.tile(cells, (1, N_atoms)),
(N_atoms * n_cells, ndim))

# Shape (N_atoms*n_cells, ndim) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
Expand Down Expand Up @@ -1268,8 +1268,8 @@ def get_cells_for_atoms(self, coords, cells):
tiled_cells = tf.tile(cells, (N_atoms, 1))

# Shape (N_atoms*n_cells, 1) after tile
tiled_coords = tf.reshape(
tf.tile(coords, (1, n_cells)), (n_cells * N_atoms, ndim))
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)),
(n_cells * N_atoms, ndim))
coords_vec = tf.reduce_sum((tiled_coords - tiled_cells)**2, axis=1)
coords_norm = tf.reshape(coords_vec, (N_atoms, n_cells))

Expand Down Expand Up @@ -1313,8 +1313,8 @@ def get_neighbor_cells(self, cells):
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(
tf.tile(cells, (1, n_cells)), (n_cells * n_cells, ndim))
tiled_centers = tf.reshape(tf.tile(cells, (1, n_cells)),
(n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))

Expand All @@ -1339,9 +1339,8 @@ def get_cells(self):
start, stop, nbr_cutoff = self.start, self.stop, self.nbr_cutoff
mesh_args = [tf.range(start, stop, nbr_cutoff) for _ in range(self.ndim)]
return tf.cast(
tf.reshape(
tf.transpose(tf.stack(tf.meshgrid(*mesh_args))),
(self.n_cells, self.ndim)), tf.float32)
tf.reshape(tf.transpose(tf.stack(tf.meshgrid(*mesh_args))),
(self.n_cells, self.ndim)), tf.float32)


class AtomicConvolution(tf.keras.layers.Layer):
Expand Down Expand Up @@ -1591,8 +1590,8 @@ def get_config(self):

def build(self, input_shape):
n_alphas = 2 * len(input_shape)
self.alphas = tf.Variable(
tf.random.normal([n_alphas, n_alphas]), name='alphas')
self.alphas = tf.Variable(tf.random.normal([n_alphas, n_alphas]),
name='alphas')
self.built = True

def call(self, inputs):
Expand Down Expand Up @@ -1753,12 +1752,11 @@ def call(self, inputs):
radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
coordinates)
return tf.concat(
[
tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
angular_sym
],
axis=2)
return tf.concat([
tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
angular_sym
],
axis=2)

def distance_matrix(self, coordinates, flags):
""" Generate distance matrix """
Expand Down Expand Up @@ -1812,9 +1810,9 @@ def radial_symmetry(self, d_cutoff, d, atom_numbers):
if self.atomic_number_differentiated:
out_tensors = []
for atom_type in self.atom_cases:
selected_atoms = tf.expand_dims(
tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
axis=3)
selected_atoms = tf.expand_dims(tf.expand_dims(
atom_numbers_embedded[:, :, atom_type], axis=1),
axis=3)
out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
return tf.concat(out_tensors, axis=2)
else:
Expand Down Expand Up @@ -1868,8 +1866,9 @@ def angular_symmetry(self, d_cutoff, d, atom_numbers, coordinates):
for atom_type_k in self.atom_cases[id_j:]:
selected_atoms = tf.stack([atom_numbers_embedded[:, :, atom_type_j]] * max_atoms, axis=2) * \
tf.stack([atom_numbers_embedded[:, :, atom_type_k]] * max_atoms, axis=1)
selected_atoms = tf.expand_dims(
tf.expand_dims(selected_atoms, axis=1), axis=4)
selected_atoms = tf.expand_dims(tf.expand_dims(selected_atoms,
axis=1),
axis=4)
out_tensors.append(
tf.reduce_sum(out_tensor * selected_atoms, axis=(2, 3)))
return tf.concat(out_tensors, axis=2)
Expand Down Expand Up @@ -1908,12 +1907,10 @@ def get_config(self):

def build(self, input_shape):
no_features = int(input_shape[0][-1])
self.W = tf.Variable(
tf.random.truncated_normal(
[no_features, self.num_vertices],
stddev=1.0 / np.sqrt(no_features)),
name='weights',
dtype=tf.float32)
self.W = tf.Variable(tf.random.truncated_normal(
[no_features, self.num_vertices], stddev=1.0 / np.sqrt(no_features)),
name='weights',
dtype=tf.float32)
self.b = tf.Variable(tf.constant(0.1), name='bias', dtype=tf.float32)
self.built = True

Expand Down Expand Up @@ -2025,18 +2022,16 @@ def get_config(self):
def build(self, input_shape):
no_features = int(input_shape[0][2])
no_A = int(input_shape[1][2])
self.W = tf.Variable(
tf.random.truncated_normal(
[no_features * no_A, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights',
dtype=tf.float32)
self.W_I = tf.Variable(
tf.random.truncated_normal(
[no_features, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights_I',
dtype=tf.float32)
self.W = tf.Variable(tf.random.truncated_normal(
[no_features * no_A, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights',
dtype=tf.float32)
self.W_I = tf.Variable(tf.random.truncated_normal(
[no_features, self.num_filters],
stddev=np.sqrt(1.0 / (no_features * (no_A + 1) * 1.0))),
name='weights_I',
dtype=tf.float32)
self.b = tf.Variable(tf.constant(0.1), name='bias', dtype=tf.float32)
self.built = True

Expand Down Expand Up @@ -2420,16 +2415,14 @@ def call(self, inputs: List) -> List:
# Note that AP_ij and AP_ji share the same self.AP_bn batch
# normalization
AP_ij = tf.matmul(
tf.reshape(
tf.gather(atom_features, atom_to_pair),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
tf.reshape(tf.gather(atom_features, atom_to_pair),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
if self.batch_normalize:
AP_ij = self.AP_bn(AP_ij)
AP_ij = activation(AP_ij)
AP_ji = tf.matmul(
tf.reshape(
tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
tf.reshape(tf.gather(atom_features, tf.reverse(atom_to_pair, [1])),
[-1, 2 * self.n_atom_input_feat]), self.W_AP) + self.b_AP
if self.batch_normalize:
AP_ji = self.AP_bn(AP_ji)
AP_ji = activation(AP_ji)
Expand Down Expand Up @@ -2935,22 +2928,33 @@ def build(self, input_shape):
self.W_list = []
self.b_list = []
self.dropouts = []
init = initializers.get(self.init)
prev_layer_size = self.n_inputs
for layer_size in self.layer_sizes:
self.W_list.append(init([prev_layer_size, layer_size]))
self.b_list.append(backend.zeros(shape=[
layer_size,
]))
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, layer_size),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(layer_size,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
prev_layer_size = layer_size
self.W_list.append(init([prev_layer_size, self.n_outputs]))
self.b_list.append(backend.zeros(shape=[
self.n_outputs,
]))
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, self.n_outputs),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(self.n_outputs,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
Expand Down Expand Up @@ -2982,16 +2986,16 @@ def call(self, inputs, training=True):

# generating index for graph features used in the inputs
stack1 = tf.reshape(
tf.stack(
[tf.boolean_mask(tf.range(n_atoms), mask)] * (self.max_atoms - 1),
axis=1), [-1])
tf.stack([tf.boolean_mask(tf.range(n_atoms), mask)] *
(self.max_atoms - 1),
axis=1), [-1])
stack2 = tf.reshape(tf.boolean_mask(parents[:, count, 1:], mask), [-1])
index = tf.stack([stack1, stack2], axis=1)
# extracting graph features for parents of the target atoms, then flatten
# shape: (batch_size*max_atoms) * [(max_atoms-1)*n_graph_features]
batch_graph_features = tf.reshape(
tf.gather_nd(graph_features, index),
[-1, (self.max_atoms - 1) * self.n_graph_feat])
tf.gather_nd(graph_features,
index), [-1, (self.max_atoms - 1) * self.n_graph_feat])

# concat into the input tensor: (batch_size*max_atoms) * n_inputs
batch_inputs = tf.concat(
Expand Down Expand Up @@ -3068,22 +3072,33 @@ def build(self, input_shape):
self.W_list = []
self.b_list = []
self.dropouts = []
init = initializers.get(self.init)
prev_layer_size = self.n_graph_feat
for layer_size in self.layer_sizes:
self.W_list.append(init([prev_layer_size, layer_size]))
self.b_list.append(backend.zeros(shape=[
layer_size,
]))
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, layer_size),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(layer_size,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
self.dropouts.append(None)
prev_layer_size = layer_size
self.W_list.append(init([prev_layer_size, self.n_outputs]))
self.b_list.append(backend.zeros(shape=[
self.n_outputs,
]))
self.W_list.append(
self.add_weight(name='kernel',
shape=(prev_layer_size, self.n_outputs),
initializer=self.init,
trainable=True))
self.b_list.append(
self.add_weight(name='bias',
shape=(self.n_outputs,),
initializer='zeros',
trainable=True))
if self.dropout is not None and self.dropout > 0.0:
self.dropouts.append(Dropout(rate=self.dropout))
else:
Expand Down Expand Up @@ -3276,10 +3291,10 @@ def get_config(self):
def build(self, input_shape):
init = initializers.get(self.init)
self.U = init((2 * self.n_hidden, 4 * self.n_hidden))
self.b = tf.Variable(
np.concatenate((np.zeros(self.n_hidden), np.ones(self.n_hidden),
np.zeros(self.n_hidden), np.zeros(self.n_hidden))),
dtype=tf.float32)
self.b = tf.Variable(np.concatenate(
(np.zeros(self.n_hidden), np.ones(self.n_hidden),
np.zeros(self.n_hidden), np.zeros(self.n_hidden))),
dtype=tf.float32)
self.built = True

def call(self, inputs):
Expand Down

0 comments on commit ba3455b

Please sign in to comment.