Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable drop in average and max layer #1027

Merged
110 changes: 40 additions & 70 deletions python/paddle/trainer/config_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -1803,9 +1803,8 @@ class ConvTransLayer(ConvTransLayerBase):

@config_layer('norm')
class NormLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(NormLayer, self).__init__(
name, 'norm', 0, inputs=inputs, device=device)
def __init__(self, name, inputs, **xargs):
super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
norm_conf = self.config.inputs[input_index].norm_conf
Expand All @@ -1817,9 +1816,8 @@ def __init__(self, name, inputs, device=None):

@config_layer('pool')
class PoolLayer(LayerBase):
def __init__(self, name, inputs, device=None):
super(PoolLayer, self).__init__(
name, 'pool', 0, inputs=inputs, device=device)
def __init__(self, name, inputs, **xargs):
super(PoolLayer, self).__init__(name, 'pool', 0, inputs=inputs, **xargs)
for input_index in xrange(len(self.inputs)):
input_layer = self.get_input_layer(input_index)
pool_conf = self.config.inputs[input_index].pool_conf
Expand Down Expand Up @@ -1851,7 +1849,6 @@ def __init__(self,
inputs,
active_type="linear",
bias=True,
device=None,
use_global_stats=True,
moving_average_fraction=0.9,
batch_norm_type=None,
Expand Down Expand Up @@ -1893,7 +1890,6 @@ def __init__(self,
0,
active_type=active_type,
inputs=inputs,
device=device,
**xargs)

if use_global_stats is not None:
Expand Down Expand Up @@ -1927,9 +1923,9 @@ def calc_parameter_size(self, image_conf):

@config_layer('trans')
class TransLayer(LayerBase):
def __init__(self, name, inputs, device=None):
def __init__(self, name, inputs, **xargs):
super(TransLayer, self).__init__(
name, 'trans', 0, inputs=inputs, device=device)
name, 'trans', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'TransLayer must have one and only one input')
Expand All @@ -1938,9 +1934,9 @@ def __init__(self, name, inputs, device=None):

@config_layer('resize')
class ResizeLayer(LayerBase):
def __init__(self, name, size, inputs, device=None):
def __init__(self, name, size, inputs, **xargs):
super(ResizeLayer, self).__init__(
name, 'resize', size=size, inputs=inputs, device=device)
name, 'resize', size=size, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 1,
'ResizeLayer must have one and only one input')
Expand Down Expand Up @@ -2265,14 +2261,9 @@ def Generator(

@config_layer('expand')
class ExpandLayer(LayerBase):
def __init__(self,
name,
inputs,
trans_type='non-seq',
device=None,
bias=False):
def __init__(self, name, inputs, trans_type='non-seq', bias=False, **xargs):
super(ExpandLayer, self).__init__(
name, 'expand', 0, inputs=inputs, device=device)
name, 'expand', 0, inputs=inputs, **xargs)
config_assert(
len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs')
self.config.trans_type = trans_type
Expand Down Expand Up @@ -2303,11 +2294,10 @@ def __init__(self,
inputs,
trans_type='non-seq',
active_type='linear',
device=None,
bias=False,
output_max_index=None):
super(MaxLayer, self).__init__(
name, 'max', 0, inputs=inputs, device=device)
output_max_index=None,
**xargs):
super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, **xargs)
config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input')
self.config.trans_type = trans_type
self.config.active_type = active_type
Expand Down Expand Up @@ -2354,15 +2344,15 @@ def __init__(self,
inputs,
active_type='linear',
trans_type='non-seq',
device=None,
bias=False):
bias=False,
**xargs):
super(SequenceLastInstanceLayer, self).__init__(
name,
'seqlastins',
0,
inputs=inputs,
device=device,
active_type=active_type)
active_type=active_type,
**xargs)
config_assert(
len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input')
self.config.trans_type = trans_type
Expand All @@ -2374,39 +2364,34 @@ def __init__(self,

@config_layer('seqfirstins')
class SequenceFirstInstanceLayer(SequenceLastInstanceLayer):
def __init__(
self,
name,
inputs,
active_type='linear',
trans_type='non-seq',
device=None,
bias=False, ):
def __init__(self,
name,
inputs,
active_type='linear',
trans_type='non-seq',
bias=False,
**xargs):
super(SequenceFirstInstanceLayer, self).__init__(
name,
inputs=inputs,
active_type=active_type,
device=device,
bias=bias)
bias=bias,
**xargs)
self.config.trans_type = trans_type
self.config.select_first = True


@config_layer('seqconcat')
class SequenceConcatLayer(LayerBase):
def __init__(self,
name,
inputs,
active_type='linear',
device=None,
bias=False):
def __init__(self, name, inputs, active_type='linear', bias=False, **xargs):
super(SequenceConcatLayer, self).__init__(
name,
'seqconcat',
0,
inputs=inputs,
device=device,
active_type=active_type)
active_type=active_type,
**xargs)
config_assert(
len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs')
for input_index in xrange(len(self.inputs)):
Expand All @@ -2422,15 +2407,15 @@ def __init__(self,
size,
inputs,
active_type='linear',
device=None,
bias=False):
bias=False,
**xargs):
super(SequenceReshapeLayer, self).__init__(
name,
'seqreshape',
size,
inputs=inputs,
device=device,
active_type=active_type)
active_type=active_type,
**xargs)
config_assert(
len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs')
self.set_layer_size(size)
Expand All @@ -2439,19 +2424,9 @@ def __init__(self,

@config_layer('subseq')
class SubSequenceLayer(LayerBase):
def __init__(self,
name,
inputs,
active_type='linear',
device=None,
bias=False):
def __init__(self, name, inputs, active_type='linear', bias=False, **xargs):
super(SubSequenceLayer, self).__init__(
name,
'subseq',
0,
inputs=inputs,
device=device,
active_type=active_type)
name, 'subseq', 0, inputs=inputs, active_type=active_type, **xargs)
config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs')
input_layer0 = self.get_input_layer(0)
size = input_layer0.size
Expand Down Expand Up @@ -2608,15 +2583,10 @@ def __init__(self,
average_strategy='average',
trans_type='non-seq',
active_type='linear',
device=None,
bias=False):
bias=False,
**xargs):
super(AverageLayer, self).__init__(
name,
'average',
0,
inputs=inputs,
device=device,
active_type=active_type)
name, 'average', 0, inputs=inputs, active_type=active_type, **xargs)
self.config.average_strategy = average_strategy
self.config.trans_type = trans_type
config_assert(len(inputs) == 1, 'AverageLayer must have 1 input')
Expand All @@ -2640,9 +2610,9 @@ def __init__(self, name, inputs, cos_scale=5, device=None):

@config_layer('tensor')
class TensorLayer(LayerBase):
def __init__(self, name, size, inputs, device=None, bias=True, **xargs):
def __init__(self, name, size, inputs, bias=True, **xargs):
super(TensorLayer, self).__init__(
name, 'tensor', size, inputs=inputs, device=device, **xargs)
name, 'tensor', size, inputs=inputs, **xargs)
config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs')
config_assert(size > 0, 'size must be positive')
config_assert(inputs[1].parameter_name == None,
Expand Down