-
-
Notifications
You must be signed in to change notification settings - Fork 1k
/
_common_blocks.py
69 lines (56 loc) · 2.08 KB
/
_common_blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from keras_applications import get_submodules_from_kwargs
def Conv2dBn(
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_batchnorm=False,
**kwargs
):
"""Extension of Conv2D layer with batchnorm"""
conv_name, act_name, bn_name = None, None, None
block_name = kwargs.pop('name', None)
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if block_name is not None:
conv_name = block_name + '_conv'
if block_name is not None and activation is not None:
act_str = activation.__name__ if callable(activation) else str(activation)
act_name = block_name + '_' + act_str
if block_name is not None and use_batchnorm:
bn_name = block_name + '_bn'
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor):
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=None,
use_bias=not (use_batchnorm),
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
name=conv_name,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
if activation:
x = layers.Activation(activation, name=act_name)(x)
return x
return wrapper