Skip to content

Commit

Permalink
馃悰Fix NotImplementedError
Browse files Browse the repository at this point in the history
Fix NotImplementedError: Layer has arguments ['self', ...] in init and therefore must override get_config().
  • Loading branch information
laugh12321 committed Aug 26, 2022
1 parent e361598 commit 4996584
Show file tree
Hide file tree
Showing 2 changed files with 124 additions and 86 deletions.
64 changes: 36 additions & 28 deletions model/CBAM_attention3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,28 +18,28 @@ class channel_attention(tf.keras.layers.Layer):
Contains the implementation of Convolutional Block Attention Module(CBAM) block.
As described in https://arxiv.org/abs/1807.06521.
"""

def __init__(self, ratio=8, **kwargs):
self.ratio = ratio
super(channel_attention, self).__init__(**kwargs)
self.ratio = ratio

def get_config(self):
config = super(channel_attention, self).get_config().copy()
config.update({
'ratio': self.ratio
})
config = super(channel_attention, self).get_config()
config.update({'ratio': self.ratio})
return config

def build(self, input_shape):
channel = input_shape[-1]
self.shared_layer_one = tf.keras.layers.Dense(channel // self.ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
self.shared_layer_two = tf.keras.layers.Dense(channel,
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros')
self.shared_layer_one = tf.keras.layers.Dense(
channel // self.ratio,
activation='relu',
kernel_initializer='he_normal',
use_bias=True,
bias_initializer='zeros'
)
self.shared_layer_two = tf.keras.layers.Dense(
channel, kernel_initializer='he_normal', use_bias=True, bias_initializer='zeros'
)
super(channel_attention, self).build(input_shape)

def compute_output_shape(self, input_shape):
Expand All @@ -48,7 +48,7 @@ def compute_output_shape(self, input_shape):
def call(self, inputs):
channel = inputs.get_shape().as_list()[-1]

avg_pool = tf.keras.layers.GlobalAveragePooling3D()(inputs)
avg_pool = tf.keras.layers.GlobalAveragePooling3D()(inputs)
avg_pool = tf.keras.layers.Reshape((1, 1, 1, channel))(avg_pool)
avg_pool = self.shared_layer_one(avg_pool)
avg_pool = self.shared_layer_two(avg_pool)
Expand All @@ -70,32 +70,41 @@ class spatial_attention(tf.keras.layers.Layer):
Contains the implementation of Convolutional Block Attention Module(CBAM) block.
As described in https://arxiv.org/abs/1807.06521.
"""

def __init__(self, kernel_size=7, **kwargs):
self.kernel_size = kernel_size
super(spatial_attention, self).__init__(**kwargs)
self.kernel_size = kernel_size

def get_config(self):
config = super(spatial_attention, self).get_config().copy()
config.update({
'kernel_size': self.kernel_size
})
config = super(spatial_attention, self).get_config()
config.update({'kernel_size': self.kernel_size})
return config

def build(self, input_shape):
self.conv3d = tf.keras.layers.Conv3D(filters=1, kernel_size=self.kernel_size,
strides=1, padding='same', activation='sigmoid',
kernel_initializer='he_normal', use_bias=False)
self.conv3d = tf.keras.layers.Conv3D(
filters=1,
kernel_size=self.kernel_size,
strides=1,
padding='same',
activation='sigmoid',
kernel_initializer='he_normal',
use_bias=False
)
super(spatial_attention, self).build(input_shape)

def compute_output_shape(self, input_shape):
return input_shape

def call(self, inputs):
avg_pool = tf.keras.layers.Lambda(lambda x: tf.keras.backend.mean(x, axis=-1, keepdims=True))(inputs)
max_pool = tf.keras.layers.Lambda(lambda x: tf.keras.backend.max(x, axis=-1, keepdims=True))(inputs)
avg_pool = tf.keras.layers.Lambda(
lambda x: tf.keras.backend.mean(x, axis=-1, keepdims=True)
)(inputs)
max_pool = tf.keras.layers.Lambda(
lambda x: tf.keras.backend.max(x, axis=-1, keepdims=True)
)(inputs)
concat = tf.keras.layers.Concatenate(axis=-1)([avg_pool, max_pool])
feature = self.conv3d(concat)
feature = self.conv3d(concat)

return tf.keras.layers.multiply([inputs, feature])


Expand All @@ -109,4 +118,3 @@ def cbam_block(feature, ratio=8, kernel_size=7):
feature = spatial_attention(kernel_size=kernel_size)(feature)

return feature

146 changes: 88 additions & 58 deletions model/DANet_attention3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,31 +18,38 @@ class Channel_attention(tf.keras.layers.Layer):
Fu, Jun, et al. "Dual attention network for scene segmentation."
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019.
"""
def __init__(self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):

def __init__(
self,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs
):
super(Channel_attention, self).__init__(**kwargs)
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
super(Channel_attention, self).__init__(**kwargs)


def get_config(self):
config = super(Channel_attention, self).get_config().copy()
config.update({
'gamma_initializer': self.gamma_initializer,
'gamma_regularizer': self.gamma_regularizer,
'gamma_constraint': self.gamma_constraint
})
config = super(Channel_attention, self).get_config()
config.update(
{
'gamma_initializer': self.gamma_initializer,
'gamma_regularizer': self.gamma_regularizer,
'gamma_constraint': self.gamma_constraint
}
)
return config

def build(self, input_shape):
self.gamma = self.add_weight(shape=(1,),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.gamma = self.add_weight(
shape=(1, ),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint
)
super(Channel_attention, self).build(input_shape)

def compute_output_shape(self, input_shape):
Expand All @@ -51,15 +58,17 @@ def compute_output_shape(self, input_shape):
def call(self, inputs):
input_shape = inputs.get_shape().as_list()

proj_query = tf.keras.layers.Reshape((input_shape[1] * input_shape[2] * input_shape[3],
input_shape[4]))(inputs)
proj_query = tf.keras.layers.Reshape(
(input_shape[1] * input_shape[2] * input_shape[3], input_shape[4])
)(inputs)
proj_key = tf.keras.backend.permute_dimensions(proj_query, (0, 2, 1))
energy = tf.keras.backend.batch_dot(proj_query, proj_key)
attention = tf.keras.activations.softmax(energy)

outputs = tf.keras.backend.batch_dot(attention, proj_query)
outputs = tf.keras.layers.Reshape((input_shape[1], input_shape[2], input_shape[3],
input_shape[4]))(outputs)
outputs = tf.keras.layers.Reshape(
(input_shape[1], input_shape[2], input_shape[3], input_shape[4])
)(outputs)
outputs = self.gamma * outputs + inputs

return outputs
Expand All @@ -72,64 +81,85 @@ class Position_attention(tf.keras.layers.Layer):
Fu, Jun, et al. "Dual attention network for scene segmentation."
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2019.
"""
def __init__(self,
ratio = 8,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs):

def __init__(
self,
ratio=8,
gamma_initializer=tf.zeros_initializer(),
gamma_regularizer=None,
gamma_constraint=None,
**kwargs
):
super(Position_attention, self).__init__(**kwargs)
self.ratio = ratio
self.gamma_initializer = gamma_initializer
self.gamma_regularizer = gamma_regularizer
self.gamma_constraint = gamma_constraint
super(Position_attention, self).__init__(**kwargs)


def get_config(self):
config = super(Position_attention, self).get_config().copy()
config.update({
'ratio': self.ratio,
'gamma_initializer': self.gamma_initializer,
'gamma_regularizer': self.gamma_regularizer,
'gamma_constraint': self.gamma_constraint
})
config = super(Position_attention, self).get_config()
config.update(
{
'ratio': self.ratio,
'gamma_initializer': self.gamma_initializer,
'gamma_regularizer': self.gamma_regularizer,
'gamma_constraint': self.gamma_constraint
}
)
return config

def build(self, input_shape):
super(Position_attention, self).build(input_shape)
self.query_conv = tf.keras.layers.Conv3D(filters=input_shape[-1] // self.ratio,
kernel_size=(1, 1, 1), use_bias=False,
kernel_initializer='he_normal')
self.key_conv = tf.keras.layers.Conv3D(filters=input_shape[-1] // self.ratio,
kernel_size=(1, 1, 1), use_bias=False,
kernel_initializer='he_normal')
self.value_conv = tf.keras.layers.Conv3D(filters=input_shape[-1], kernel_size=(1, 1, 1),
use_bias=False, kernel_initializer='he_normal')
self.gamma = self.add_weight(shape=(1,),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
self.query_conv = tf.keras.layers.Conv3D(
filters=input_shape[-1] // self.ratio,
kernel_size=(1, 1, 1),
use_bias=False,
kernel_initializer='he_normal'
)
self.key_conv = tf.keras.layers.Conv3D(
filters=input_shape[-1] // self.ratio,
kernel_size=(1, 1, 1),
use_bias=False,
kernel_initializer='he_normal'
)
self.value_conv = tf.keras.layers.Conv3D(
filters=input_shape[-1],
kernel_size=(1, 1, 1),
use_bias=False,
kernel_initializer='he_normal'
)
self.gamma = self.add_weight(
shape=(1, ),
initializer=self.gamma_initializer,
name='gamma',
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint
)

def compute_output_shape(self, input_shape):
return input_shape

def call(self, inputs):
input_shape = inputs.get_shape().as_list()

proj_query = tf.keras.layers.Reshape((input_shape[1] * input_shape[2] * input_shape[3],
input_shape[4] // self.ratio))(self.query_conv(inputs))
proj_query = tf.keras.layers.Reshape(
(input_shape[1] * input_shape[2] * input_shape[3], input_shape[4] // self.ratio)
)(self.query_conv(inputs))
proj_query = tf.keras.backend.permute_dimensions(proj_query, (0, 2, 1))
proj_key = tf.keras.layers.Reshape((input_shape[1] * input_shape[2] * input_shape[3],
input_shape[4] // self.ratio))(self.key_conv(inputs))
proj_key = tf.keras.layers.Reshape(
(input_shape[1] * input_shape[2] * input_shape[3], input_shape[4] // self.ratio)
)(self.key_conv(inputs))
energy = tf.keras.backend.batch_dot(proj_key, proj_query)
attention = tf.keras.activations.softmax(energy)

proj_value = tf.keras.layers.Reshape((input_shape[1] * input_shape[2] * input_shape[3],
input_shape[4]))(self.value_conv(inputs))
proj_value = tf.keras.layers.Reshape(
(input_shape[1] * input_shape[2] * input_shape[3], input_shape[4])
)(self.value_conv(inputs))

outputs = tf.keras.backend.batch_dot(attention, proj_value)
outputs = tf.keras.layers.Reshape((input_shape[1], input_shape[2], input_shape[3],
input_shape[4]))(outputs)
outputs = tf.keras.layers.Reshape(
(input_shape[1], input_shape[2], input_shape[3], input_shape[4])
)(outputs)
outputs = self.gamma * outputs + inputs

return outputs

0 comments on commit 4996584

Please sign in to comment.