Skip to content

Commit

Permalink
Fix code style
Browse files Browse the repository at this point in the history
  • Loading branch information
Yi Wang committed May 26, 2017
1 parent 7d0355c commit c59f6ae
Show file tree
Hide file tree
Showing 9 changed files with 30 additions and 29 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
hooks:
- id: remove-crlf
files: (?!.*third_party)^.*$ | (?!.*book)^.*$
- repo: https://github.com/reyoung/mirrors-yapf.git
- repo: https://github.com/pre-commit/mirrors-yapf.git
sha: v0.13.2
hooks:
- id: yapf
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/BufferArgTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "BufferArg.h"
#include <gtest/gtest.h>
#include "BufferArg.h"
#include "paddle/math/MemoryHandle.h"

namespace paddle {
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/FunctionTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "Function.h"
#include <gtest/gtest.h>
#include "Function.h"
#include "paddle/math/SparseMatrix.h"

namespace paddle {
Expand Down
2 changes: 1 addition & 1 deletion paddle/function/TensorShapeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "TensorShape.h"
#include <gtest/gtest.h>
#include "TensorShape.h"

namespace paddle {

Expand Down
2 changes: 1 addition & 1 deletion paddle/function/TensorTypeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "TensorType.h"
#include <gtest/gtest.h>
#include "TensorType.h"

namespace paddle {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import copy
import paddle.trainer.config_parser as config_parser
from paddle.proto.TrainerConfig_pb2 import OptimizationConfig

'''
This file is a wrapper of formal config_parser. The main idea of this file is to
separete different config logic into different function, such as network configuration
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/trainer_config_helpers/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ def inputs(layers, *args):
if len(args) != 0:
layers.extend(args)

Inputs(*[l.name for l in layers])
Inputs(* [l.name for l in layers])


def outputs(layers, *args):
Expand Down Expand Up @@ -1447,7 +1447,7 @@ def __dfs_travel__(layer,
assert len(layers) > 0

if HasInputsSet(): # input already set
Outputs(*[l.name for l in layers])
Outputs(* [l.name for l in layers])
return # just return outputs.

if len(layers) != 1:
Expand Down
40 changes: 20 additions & 20 deletions python/paddle/v2/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
__all__ = ['data', 'parse_network']
__layer_map__ = {}


def __wrap__(f):
def wrapped(*args, **xargs):
out = f(*args, **xargs)
Expand All @@ -53,6 +54,7 @@ def wrapped(*args, **xargs):

return wrapped


def __need_to_keep__(name):
if name in ['StaticInput', 'LayerType', 'layer_support']:
return False
Expand Down Expand Up @@ -99,6 +101,7 @@ def __data_layer__(name, type, **kwargs):
l.data_type = type
return l


data = __wrap__(__data_layer__)

LayerV2 = v1_layers.LayerOutput
Expand All @@ -107,6 +110,7 @@ def __data_layer__(name, type, **kwargs):
def __get_used_layers__(output_layers, extra_layers=None):
layer_names = set()
parents = {}

def add_parent(child, parent):
if child in parents:
parents[child].append(parent)
Expand Down Expand Up @@ -181,28 +185,25 @@ def __get_used_evaluators__(layer_names):
return evaluator_names


def __trim_submodel__(old_submodel,
layer_names,
input_layer_names,
output_layer_names,
evaluator_names):
def __trim_submodel__(old_submodel, layer_names, input_layer_names,
output_layer_names, evaluator_names):

submodel = SubModelConfig()
submodel.name = old_submodel.name
submodel.layer_names.extend(filter(lambda x: x in layer_names,
old_submodel.layer_names))
submodel.input_layer_names.extend(filter(lambda x: x in input_layer_names,
submodel.layer_names))
submodel.output_layer_names.extend(filter(lambda x: x in output_layer_names,
submodel.layer_names))
submodel.evaluator_names.extend(filter(lambda x: x in evaluator_names,
old_submodel.evaluator_names))
submodel.layer_names.extend(
filter(lambda x: x in layer_names, old_submodel.layer_names))
submodel.input_layer_names.extend(
filter(lambda x: x in input_layer_names, submodel.layer_names))
submodel.output_layer_names.extend(
filter(lambda x: x in output_layer_names, submodel.layer_names))
submodel.evaluator_names.extend(
filter(lambda x: x in evaluator_names, old_submodel.evaluator_names))

submodel.is_recurrent_layer_group = old_submodel.is_recurrent_layer_group
submodel.reversed = old_submodel.reversed

submodel.memories.extend(filter(lambda x: x.link_name in layer_names,
old_submodel.memories))
submodel.memories.extend(
filter(lambda x: x.link_name in layer_names, old_submodel.memories))
target_inlinkid = (old_submodel.target_inlinkid
if old_submodel.HasField('target_inlinkid') else -1)
in_links = []
Expand All @@ -213,8 +214,8 @@ def __trim_submodel__(old_submodel,
target_inlinkid = len(in_links) - 1
submodel.in_links.extend(in_links)

submodel.out_links.extend(filter(lambda x: x.link_name in layer_names,
old_submodel.out_links))
submodel.out_links.extend(
filter(lambda x: x.link_name in layer_names, old_submodel.out_links))
if old_submodel.HasField('generator'):
submodel.generator.CopyFrom(old_submodel.generator)

Expand Down Expand Up @@ -264,9 +265,8 @@ def parse_network(output_layers, extra_layers=None):

for s in cp.g_config.model_config.sub_models:
if s.name in submodel_names:
s = __trim_submodel__(
s, layer_names, input_layer_names, output_layer_names,
evaluator_names)
s = __trim_submodel__(s, layer_names, input_layer_names,
output_layer_names, evaluator_names)
model_config.sub_models.extend([s])

return model_config
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/v2/tests/test_rnn_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def test_simple_rnn(self):

def parse_old_rnn():
reset_parser()

def step(y):
mem = conf_helps.memory(name="rnn_state", size=hidden_dim)
out = conf_helps.fc_layer(
Expand All @@ -52,6 +53,7 @@ def test():

def parse_new_rnn():
reset_parser()

def new_step(y):
mem = layer.memory(name="rnn_state", size=hidden_dim)
out = layer.fc(input=[y, mem],
Expand All @@ -72,7 +74,6 @@ def new_step(y):
parse_new_rnn().splitlines(1))
print ''.join(diff)


def test_sequence_rnn_multi_input(self):
dict_dim = 10
word_dim = 8
Expand All @@ -81,6 +82,7 @@ def test_sequence_rnn_multi_input(self):

def parse_old_rnn():
reset_parser()

def test():
data = conf_helps.data_layer(name="word", size=dict_dim)
label = conf_helps.data_layer(name="label", size=label_dim)
Expand Down

0 comments on commit c59f6ae

Please sign in to comment.