Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
Browse files Browse the repository at this point in the history
… add_dilation
  • Loading branch information
NHZlX committed Nov 14, 2017
2 parents fbd8a33 + 7345de3 commit f3818bd
Show file tree
Hide file tree
Showing 6 changed files with 98 additions and 2 deletions.
1 change: 1 addition & 0 deletions paddle/operators/beam_search_decode_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class BeamSearchDecodeOp : public framework::OperatorBase {
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
framework::ExecutionContext ctx(*this, scope, dev_ctx);

const LoDTensorArray* ids = ctx.Input<LoDTensorArray>("Ids");
const LoDTensorArray* scores = ctx.Input<LoDTensorArray>("Scores");
const size_t step_num = ids->size();
Expand Down
2 changes: 1 addition & 1 deletion paddle/operators/l1_norm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class L1NormKernel : public framework::OpKernel<T> {
Out->mutable_data<T>(context.GetPlace());

auto x = framework::EigenVector<T>::Flatten(*X);
auto out = framework::EigenVector<T>::Flatten(*Out);
auto out = framework::EigenScalar<T>::From(*Out);
auto place = context.GetEigenDevice<Place>();

out.device(place) = x.abs().sum();
Expand Down
2 changes: 1 addition & 1 deletion paddle/operators/squared_l2_norm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class SquaredL2NormKernel : public framework::OpKernel<T> {
Out->mutable_data<T>(context.GetPlace());

auto x = framework::EigenVector<T>::Flatten(*X);
auto out = framework::EigenVector<T>::Flatten(*Out);
auto out = framework::EigenScalar<T>::From(*Out);
auto place = context.GetEigenDevice<Place>();

out.device(place) = x.square().sum();
Expand Down
3 changes: 3 additions & 0 deletions paddle/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ limitations under the License. */
#include "paddle/platform/gpu_info.h"
#endif

// disable auto conversion to list in Python
PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);

namespace paddle {
namespace pybind {
static size_t UniqueIntegerGenerator(const std::string &prefix) {
Expand Down
17 changes: 17 additions & 0 deletions python/paddle/v2/framework/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,23 @@ def batch_norm(input,
return helper.append_activation(batch_norm_out)


def beam_search_decode(ids, scores, main_program=None, startup_program=None):
helper = LayerHelper('beam_search_decode', **locals())
sentence_ids = helper.create_tmp_variable(dtype=ids.data_type)
sentence_scores = helper.create_tmp_variable(dtype=ids.data_type)

helper.append_op(
type="beam_search_decode",
inputs={"Ids": ids,
"Scores": scores},
outputs={
"SentenceIds": sentence_ids,
"SentenceScores": sentence_scores
})

return sentence_ids, sentence_scores


class BlockGuard(object):
"""
BlockGuard class.
Expand Down
75 changes: 75 additions & 0 deletions python/paddle/v2/framework/tests/test_beam_search_decode_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import unittest

import numpy as np
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator


class TestBeamSearchDecodeOp(unittest.TestCase):
def setUp(self):
self.scope = core.Scope()
self.cpu_place = core.CPUPlace()

def append_lod_tensor(self, tensor_array, lod, data):
lod_tensor = core.LoDTensor()
lod_tensor.set_lod(lod)
lod_tensor.set(data, self.cpu_place)
tensor_array.append(lod_tensor)

def test_get_set(self):
ids = self.scope.var("ids").get_lod_tensor_array()
self.append_lod_tensor(
ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],
np.array(
[1, 2, 3, 4, 5, 6], dtype="int64"))
self.append_lod_tensor(
ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],
np.array(
[0, 1, 2, 3, 4, 5], dtype="int64"))
self.append_lod_tensor(
ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],
np.array(
[0, 1, 2, 3, 4], dtype="int64"))

scores = self.scope.var("scores").get_lod_tensor_array()
self.append_lod_tensor(
scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],
np.array(
[1, 2, 3, 4, 5, 6], dtype="float32"))
self.append_lod_tensor(
scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],
np.array(
[0, 1, 2, 3, 4, 5], dtype="float32"))
self.append_lod_tensor(
scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],
np.array(
[0, 1, 2, 3, 4], dtype="float32"))

sentence_ids = self.scope.var("sentence_ids").get_tensor()
sentence_scores = self.scope.var("sentence_scores").get_tensor()

beam_search_decode_op = Operator(
"beam_search_decode",
# inputs
Ids="ids",
Scores="scores",
# outputs
SentenceIds="sentence_ids",
SentenceScores="sentence_scores")

ctx = core.DeviceContext.create(self.cpu_place)
beam_search_decode_op.run(self.scope, ctx)

expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]]
self.assertEqual(sentence_ids.lod(), expected_lod)
self.assertEqual(sentence_scores.lod(), expected_lod)

expected_data = np.array(
[2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64")
self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))
self.assertTrue(
np.array_equal(np.array(sentence_scores), expected_data))


if __name__ == '__main__':
unittest.main()

0 comments on commit f3818bd

Please sign in to comment.