From 3ca5df0f479db82a39ace4962ede193bc18d9152 Mon Sep 17 00:00:00 2001 From: gangliao Date: Fri, 30 Sep 2016 13:53:23 +0800 Subject: [PATCH 001/180] Fix bug when only support AVX 2 (#150) In some situation, for instance, in the virtual machine, it could happen. --- cmake/FindAVX.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/FindAVX.cmake b/cmake/FindAVX.cmake index 58b89918ec622..f6103c6e667e8 100644 --- a/cmake/FindAVX.cmake +++ b/cmake/FindAVX.cmake @@ -59,7 +59,7 @@ IF(${FIND_AVX_10}) ENDIF() ENDIF() -IF("${FIND_AVX_10}" OR "${FIND_AVX_20}") +IF(${FIND_AVX_10}) SET(AVX_FOUND TRUE) MESSAGE(STATUS "Find CPU supports ${AVX_FLAGS}.") ENDIF() From 0276f15a4537cfc0a58a41a3a9eb695aab5a0fea Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 30 Sep 2016 14:58:02 +0800 Subject: [PATCH 002/180] add scripts to build ubuntu install package. (#132) * also refine install docs, too --- .../install/ubuntu_install.rst | 29 ++++++++++++++- paddle/scripts/deb/build_scripts/.gitignore | 1 + paddle/scripts/deb/build_scripts/Dockerfile | 5 +++ paddle/scripts/deb/build_scripts/build.sh | 37 +++++++++++++++++++ paddle/scripts/deb/build_scripts/build_deb.sh | 8 ++++ 5 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 paddle/scripts/deb/build_scripts/.gitignore create mode 100644 paddle/scripts/deb/build_scripts/Dockerfile create mode 100755 paddle/scripts/deb/build_scripts/build.sh create mode 100755 paddle/scripts/deb/build_scripts/build_deb.sh diff --git a/doc_cn/build_and_install/install/ubuntu_install.rst b/doc_cn/build_and_install/install/ubuntu_install.rst index 7cdd470677eb8..a813d9da2e52d 100644 --- a/doc_cn/build_and_install/install/ubuntu_install.rst +++ b/doc_cn/build_and_install/install/ubuntu_install.rst @@ -16,14 +16,41 @@ https://github.com/baidu/Paddle/releases/tag/V0.8.0b0 .. code-block:: shell - dpkg -i paddle-0.8.0b-cpu.deb + dpkg -i paddle-*-cpu.deb apt-get install -f 在 :code:`dpkg -i` 的时候如果报一些依赖未找到的错误是正常的, 在 :code:`apt-get install -f` 里会继续安装 PaddlePaddle。 + +或者使用下面一条命令安装. + +.. code-block:: shell + + gdebi paddle-*-cpu.deb + +如果 :code:`gdebi` 没有安装,则需要使用 :code:`sudo apt-get install gdebi`, 来安装 :code:`gdebi` + + 需要注意的是,如果使用GPU版本的PaddlePaddle,请安装CUDA 7.5 和CUDNN 5到本地环境中, 并设置好对应的环境变量(LD_LIBRARY_PATH等等)。 +安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本。可能的输出为 + +.. code-block:: text + + PaddlePaddle 0.8.0b1, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_glog: ON + with_gflags: ON + with_metric_learning: + with_timer: OFF + with_predict_sdk: + + 可能遇到的问题 -------------- diff --git a/paddle/scripts/deb/build_scripts/.gitignore b/paddle/scripts/deb/build_scripts/.gitignore new file mode 100644 index 0000000000000..1521c8b7652b1 --- /dev/null +++ b/paddle/scripts/deb/build_scripts/.gitignore @@ -0,0 +1 @@ +dist diff --git a/paddle/scripts/deb/build_scripts/Dockerfile b/paddle/scripts/deb/build_scripts/Dockerfile new file mode 100644 index 0000000000000..db365a65b7d33 --- /dev/null +++ b/paddle/scripts/deb/build_scripts/Dockerfile @@ -0,0 +1,5 @@ +FROM paddledev/paddle:gpu-latest +MAINTAINER PaddlePaddle Dev Team +COPY build.sh /root/ +CMD cd /root/ && bash build.sh + diff --git a/paddle/scripts/deb/build_scripts/build.sh b/paddle/scripts/deb/build_scripts/build.sh new file mode 100755 index 0000000000000..662d2a9103f7d --- /dev/null +++ b/paddle/scripts/deb/build_scripts/build.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -e +apt-get install -y dh-make +cd ~ +mkdir -p ~/dist/gpu +mkdir -p ~/dist/cpu +mkdir -p ~/dist/cpu-noavx +mkdir -p ~/dist/gpu-noavx +git clone https://github.com/baidu/Paddle.git paddle +cd paddle +mkdir build +cd build +cmake .. -DWITH_GPU=OFF -DWITH_SWIG_PY=ON -DWITH_AVX=ON +make -j `nproc` +cpack -D CPACK_GENERATOR='DEB' .. +mv *.deb ~/dist/cpu + +rm -rf * +cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON -DWITH_AVX=ON -DCUDNN_ROOT=/usr/ +make -j `nproc` +cpack -D CPACK_GENERATOR='DEB' .. +mv *.deb ~/dist/gpu + + +rm -rf * +cmake .. -DWITH_GPU=OFF -DWITH_SWIG_PY=ON -DWITH_AVX=OFF +make -j `nproc` +cpack -D CPACK_GENERATOR='DEB' .. +mv *.deb ~/dist/cpu-noavx + +rm -rf * +cmake .. -DWITH_GPU=ON -DWITH_SWIG_PY=ON -DWITH_AVX=OFF -DCUDNN_ROOT=/usr/ +make -j `nproc` +cpack -D CPACK_GENERATOR='DEB' .. +mv *.deb ~/dist/gpu-noavx + + diff --git a/paddle/scripts/deb/build_scripts/build_deb.sh b/paddle/scripts/deb/build_scripts/build_deb.sh new file mode 100755 index 0000000000000..1331c1249d5a7 --- /dev/null +++ b/paddle/scripts/deb/build_scripts/build_deb.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -e +docker build -t build_paddle_deb . +rm -rf dist +mkdir -p dist +docker run -v$PWD/dist:/root/dist --name tmp_build_deb_container build_paddle_deb +docker rm tmp_build_deb_container +docker rmi build_paddle_deb From b52039bd11f76142e2cf418e636f933fb91c91a7 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 30 Sep 2016 15:00:32 +0800 Subject: [PATCH 003/180] some bug fix for sparse matrix (#133) * some bug fix for sparse matrix * a minor bug fix --- paddle/cuda/include/hl_sparse.h | 4 ++ paddle/cuda/src/hl_cuda_sparse.cu | 107 ++++++++++------------------- paddle/cuda/src/hl_cuda_sparse.cuh | 48 ++++++++++--- paddle/gserver/layers/Layer.cpp | 27 ++++++-- 4 files changed, 98 insertions(+), 88 deletions(-) diff --git a/paddle/cuda/include/hl_sparse.h b/paddle/cuda/include/hl_sparse.h index 22f7a228e0ad6..9acdebdebf377 100644 --- a/paddle/cuda/include/hl_sparse.h +++ b/paddle/cuda/include/hl_sparse.h @@ -223,6 +223,7 @@ extern void hl_matrix_csc2dense(hl_sparse_matrix_s A_d, * @param[in] dimK width of op(A) & height of op(B) * @param[in] alpha scalar used for multiplication. * @param[in] beta scalar used for multiplication. + * If beta is zero, C does not have to be a valid input. * * @note transb is not support HPPL_OP_T. * @@ -251,6 +252,7 @@ extern void hl_matrix_csr_mul_dense(hl_sparse_matrix_s A_d, * @param[in] dimK width of op(A) & height of op(B) * @param[in] alpha scalar used for multiplication. * @param[in] beta scalar used for multiplication. + * If beta is zero, C does not have to be a valid input. * * @note transb is not support HPPL_OP_T. * @@ -275,6 +277,7 @@ extern void hl_matrix_csc_mul_dense(hl_sparse_matrix_s A_d, * @param[in] dimK width of op(A) & height of op(B) * @param[in] alpha scalar used for multiplication. * @param[in] beta scalar used for multiplication. + * If beta is zero, C does not have to be a valid input. * * @note transa is not support HPPL_OP_T. * @@ -327,6 +330,7 @@ extern void hl_sparse_matrix_mul(real* A_d, hl_trans_op_t transa, * @param[in] dimK width of op(A) & height of op(B) * @param[in] alpha scalar used for multiplication. * @param[in] beta scalar used for multiplication. + * If beta is zero, C does not have to be a valid input. * * * @note transa is not support HPPL_OP_T. diff --git a/paddle/cuda/src/hl_cuda_sparse.cu b/paddle/cuda/src/hl_cuda_sparse.cu index b42568afdaaf5..1687fcc221ab8 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cu +++ b/paddle/cuda/src/hl_cuda_sparse.cu @@ -562,6 +562,22 @@ void hl_memcpy_sparse_matrix(hl_sparse_matrix_s dst, } } +/** + * Calculate beta * C, if beta is zero, C does not have to be a valid input. + */ +static void _beta_mul_c(real *c, int dimM, int dimN, real beta) { + if (beta == 0.0) { + hl_gpu_apply_unary_op(unary::Zero(), c, dimM, dimN, dimN); + } else { + if (beta != 1.0){ + hl_gpu_apply_unary_op( + unary::mul_scalar(beta), c, dimM, dimN, dimN); + } + } + + return; +} + void hl_matrix_csr_mul_dense(hl_sparse_matrix_s A_d, hl_trans_op_t transa, real *B_d, hl_trans_op_t transb, real *C_d, @@ -580,15 +596,8 @@ void hl_matrix_csr_mul_dense(hl_sparse_matrix_s A_d, hl_trans_op_t transa, } if (A_d->nnz == 0) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } else { - return; - } + _beta_mul_c(C_d, dimM, dimN, beta); + return; } /* nnz != 0 */ @@ -633,13 +642,7 @@ void hl_matrix_csr_mul_dense(hl_sparse_matrix_s A_d, hl_trans_op_t transa, beta); } } else if (HPPL_OP_T == transa) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } + _beta_mul_c(C_d, dimM, dimN, beta); int blocksX = (dimN + CU_CSC_MUL_DENSE_BLOCK_N - 1) / CU_CSC_MUL_DENSE_BLOCK_N; @@ -699,15 +702,8 @@ void hl_matrix_dense_mul_csc(real *A_d, hl_trans_op_t transa, << "matrix format error!"; if (B_d->nnz == 0) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } else { - return; - } + _beta_mul_c(C_d, dimM, dimN, beta); + return; } /* nnz != 0 */ @@ -750,13 +746,7 @@ void hl_matrix_dense_mul_csc(real *A_d, hl_trans_op_t transa, beta); } } else if (transb == HPPL_OP_T) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } + _beta_mul_c(C_d, dimM, dimN, beta); int blocksX = 1 + (dimK-1)/CU_DM_CSR_THREAD_X; int blocksY = 1 + (dimM-1)/CU_DM_CSR_BLOCK_M; dim3 threads(CU_DM_CSR_THREAD_X, CU_DM_CSR_THREAD_Y); @@ -813,15 +803,8 @@ void hl_matrix_dense_mul_csr(real *A_d, hl_trans_op_t transa, << "matrix format error!"; if (B_d->nnz == 0) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } else { - return; - } + _beta_mul_c(C_d, dimM, dimN, beta); + return; } /* nnz != 0 */ @@ -833,14 +816,7 @@ void hl_matrix_dense_mul_csr(real *A_d, hl_trans_op_t transa, } if (transb == HPPL_OP_N) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } - + _beta_mul_c(C_d, dimM, dimN, beta); int blocksX = 1 + (dimK-1)/CU_DM_CSR_THREAD_X; int blocksY = 1 + (dimM-1)/CU_DM_CSR_BLOCK_M; dim3 threads(CU_DM_CSR_THREAD_X, CU_DM_CSR_THREAD_Y); @@ -925,15 +901,8 @@ void hl_matrix_csc_mul_dense(hl_sparse_matrix_s A_d, hl_trans_op_t transa, } if (A_d->nnz == 0) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } else { - return; - } + _beta_mul_c(C_d, dimM, dimN, beta); + return; } /* nnz != 0 */ @@ -945,13 +914,7 @@ void hl_matrix_csc_mul_dense(hl_sparse_matrix_s A_d, hl_trans_op_t transa, } if (HPPL_OP_N == transa) { - if (beta != 1.0) { - hl_gpu_apply_unary_op(unary::mul_scalar(beta), - C_d, - dimM, - dimN, - dimN); - } + _beta_mul_c(C_d, dimM, dimN, beta); int blocksX = (dimN + CU_CSC_MUL_DENSE_BLOCK_N -1)/CU_CSC_MUL_DENSE_BLOCK_N; int blocksY = (dimK + CU_CSC_MUL_DENSE_BLOCK_K -1)/CU_CSC_MUL_DENSE_BLOCK_K; @@ -1113,7 +1076,7 @@ void hl_sparse_matrix_mul(real *A_d, hl_trans_op_t transa, CHECK(!transA) << "Not supported A is trans and B is not trans!"; dim3 block(CU_BLOCK_SIZE, 1); - int avgNnzPerRow = C_d2->nnz_s / dimM; + int avgNnzPerRow = C_d->nnz / dimM; avgNnzPerRow = avgNnzPerRow > 0 ? avgNnzPerRow : 1; int gridx = DIVUP(avgNnzPerRow, CU_BLOCK_SIZE); dim3 grid(gridx, dimM); @@ -1242,9 +1205,9 @@ void hl_matrix_csr_column_sum(real* A_d, hl_sparse_matrix_s B_d, LOG(FATAL) << "parameter B is null!"; } - if (B_d2->nnz_s == 0) return; + if (B_d->nnz == 0) return; - int nnz = B_d2->nnz_s; + int nnz = B_d->nnz; int block = 512; int grid = DIVUP(nnz, 512); KeSMatrixCsrColumnSum<<>>( @@ -1273,9 +1236,9 @@ void hl_matrix_csr_add_bias(hl_sparse_matrix_s A_d, real* B_d, LOG(FATAL) << "parameter A_d is null!"; } - if (A_d2->nnz_s == 0) return; + if (A_d->nnz == 0) return; - int nnz = A_d2->nnz_s; + int nnz = A_d->nnz; int block = 512; int grid = DIVUP(nnz, 512); KeSMatrixCsrAddBias<<>>( @@ -1308,9 +1271,9 @@ void hl_matrix_csr_add_dense(hl_sparse_matrix_s A_d, real* B_d, int dimM, LOG(FATAL) << "parameter A_d is null!"; } - if (A_d2->nnz_s == 0) return; + if (A_d->nnz == 0) return; - int gridX = DIVUP((A_d2->nnz_s / dimM), 512); + int gridX = DIVUP((A_d->nnz / dimM), 512); gridX = gridX > 0 ? gridX : 1; dim3 block(512, 1); dim3 grid(gridX, dimM); diff --git a/paddle/cuda/src/hl_cuda_sparse.cuh b/paddle/cuda/src/hl_cuda_sparse.cuh index db5c9ce979885..13e89390d68c2 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cuh +++ b/paddle/cuda/src/hl_cuda_sparse.cuh @@ -85,6 +85,15 @@ __global__ void KeSMatrixCsc2Dense(real * csc_val, C_d[row*dimN + col] = sum; } +__device__ __forceinline__ +void _calculate_c(real &c, real sum) { + c = sum; +} +__device__ __forceinline__ +void _calculate_c(real &c, real sum, real beta) { + c = sum + beta * c; +} + #define CU_CSRMM_N 4 #define CU_CSRMM_THREAD_X 32 #define CU_CSRMM_THREAD_Y 32 @@ -191,11 +200,19 @@ __global__ void KeSMatrixCsrMulDense(real *C_d, } C_d += __mul24(index_m, dimN); - #pragma unroll - for (int n = 0; n < CU_CSRMM_N; n++) { - if (index_n < dimN) { - C_d[index_n] = alpha*sum[n] + beta*C_d[index_n]; - index_n += CU_CSRMM_THREAD_X; + if (beta == 0.0) { + for (int n = 0; n < CU_CSRMM_N; n++) { + if (index_n < dimN) { + _calculate_c(C_d[index_n], alpha * sum[n]); + index_n += CU_CSRMM_THREAD_X; + } + } + } else { + for (int n = 0; n < CU_CSRMM_N; n++) { + if (index_n < dimN) { + _calculate_c(C_d[index_n], alpha * sum[n], beta); + index_n += CU_CSRMM_THREAD_X; + } } } } @@ -544,13 +561,22 @@ TEMP_TEST: int index_m_c = ibx + idy; int index_n_c = blockIdx.y*CU_CSCMM_BLOCK_N_BEST + idx; C_d += index_n_c + __mul24(index_m_c, dimN); - #pragma unroll - for (int m = 0; m < CU_CSCMM_THREAD_M_BEST; m++) { - if (index_m_c < dimM && index_n_c < dimN) { - C_d[0] = A_s[idy+m*32][idx] + beta*C_d[0]; + if (beta == 0.0) { + for (int m = 0; m < CU_CSCMM_THREAD_M_BEST; m++) { + if (index_m_c < dimM && index_n_c < dimN) { + _calculate_c(C_d[0], A_s[idy + m * 32][idx]); + } + index_m_c += 32; + C_d += dimN*32; + } + } else { + for (int m = 0; m < CU_CSCMM_THREAD_M_BEST; m++) { + if (index_m_c < dimM && index_n_c < dimN) { + _calculate_c(C_d[0], A_s[idy + m * 32][idx], beta); + } + index_m_c += 32; + C_d += dimN*32; } - index_m_c += 32; - C_d += dimN*32; } } diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/gserver/layers/Layer.cpp index 44ea95c80ab08..78d15c553021d 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/gserver/layers/Layer.cpp @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/utils/Util.h" #include "paddle/utils/Logging.h" +#include "paddle/math/SparseMatrix.h" #include "AddtoLayer.h" #include "CosSimLayer.h" @@ -290,14 +291,30 @@ void Layer::showOutputStats() { << " is 0, skip to show the statistics"; return; } - real mean = out->getSum() / out->getElementCnt(); - MatrixPtr outSquare = out->clone(); - outSquare->copyFrom(*out); + MatrixPtr outSquare; + if (dynamic_cast(out.get())) { + GpuSparseMatrix *tmp = dynamic_cast(out.get()); + outSquare = std::make_shared( + tmp->getHeight(), tmp->getWidth(), tmp->getElementCnt(), + tmp->getValueType(), tmp->getFormat()); + } else { + outSquare = out->clone(); + } + outSquare->copyFrom(*out, HPPL_STREAM_DEFAULT); + hl_stream_synchronize(HPPL_STREAM_DEFAULT); + + real mean = outSquare->getSum() / out->getElementCnt(); + real min; + real max; if (dynamic_cast(outSquare.get())) { auto tmpMat = dynamic_cast(outSquare.get()); + min = tmpMat->getMin(); + max = tmpMat->getMax(); tmpMat->square(); LOG(INFO) << "show statistics of [none zero values] in sparse matrix"; } else { + min = outSquare->getMin(); + max = outSquare->getMax(); outSquare->square(); } real std = (outSquare->getSum() / outSquare->getElementCnt()) - mean * mean; @@ -306,8 +323,8 @@ void Layer::showOutputStats() { << ", " << "std=" << std << ", " - << "min=" << out->getMin() << ", " - << "max=" << out->getMax(); + << "min=" << min << ", " + << "max=" << max; } void Layer::forwardActivation() { From 2a0a5391cae761107dd243cce801b3ad99723f70 Mon Sep 17 00:00:00 2001 From: gangliao Date: Fri, 30 Sep 2016 15:04:51 +0800 Subject: [PATCH 004/180] Update build docs (#149) * Add automatic check AVX in CMake * Add indent in FindAVX.cmake * Revise table format and some words in build docs * Update build docs * Update build docs --- doc/build/build_from_source.md | 67 +++++++++++----------------------- 1 file changed, 21 insertions(+), 46 deletions(-) diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md index c671f483863c7..f7db0a9b92e67 100644 --- a/doc/build/build_from_source.md +++ b/doc/build/build_from_source.md @@ -1,5 +1,5 @@ Installing from Sources -================= +========================== * [1. Download and Setup](#download) * [2. Requirements](#requirements) @@ -28,51 +28,26 @@ To compile the source code, your computer must be equipped with GCC >=4.6 or Cla PaddlePaddle supports some build options. To enable it, first you need to install the related libraries. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +
OptionalDescription
WITH_GPUCompile with GPU mode.
WITH_DOUBLECompile with double precision floating-point, default: single precision.
WITH_GLOGCompile with glog. If not found, default: an internal log implementation.
WITH_GFLAGSCompile with gflags. If not found, default: an internal flag implementation.
WITH_TESTINGCompile with gtest for PaddlePaddle's unit testing.
WITH_DOCCompile to generate PaddlePaddle's docs, default: disabled (OFF)
WITH_SWIG_PYCompile with python predict API, default: disabled (OFF).
WITH_STYLE_CHECKCompile with code style check, default: enabled (ON).
+ + + + + + + + + + + + + + + +
OptionalDescription
WITH_GPUCompile with GPU mode.
WITH_DOUBLECompile with double precision floating-point, default: single precision.
WITH_GLOGCompile with glog. If not found, default: an internal log implementation.
WITH_GFLAGSCompile with gflags. If not found, default: an internal flag implementation.
WITH_TESTINGCompile with gtest for PaddlePaddle's unit testing.
WITH_DOC Compile to generate PaddlePaddle's docs, default: disabled (OFF).
WITH_SWIG_PYCompile with python predict API, default: disabled (OFF).
WITH_STYLE_CHECKCompile with code style check, default: enabled (ON).
+ **Note:** - The GPU version works best with Cuda Toolkit 7.5 and cuDNN v5. @@ -334,4 +309,4 @@ It may require sudo privileges: sudo pip install /opt/paddle/share/wheels/*.whl # or just run sudo paddle version -``` \ No newline at end of file +``` From b221509695f2181714b3b68fa35c1d200d795a06 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 30 Sep 2016 16:51:49 +0800 Subject: [PATCH 005/180] [DOC CHANGE] Rerange Build docs & emphasize them in README.md (#151) * Rerange Build docs & emphasize them in README.md * Rerange Build docs & emphasize them in README.md --- README.md | 4 +- doc/build/docker_install.md | 91 ------------- doc/build/docker_install.rst | 122 ++++++++++++++++++ doc/build/index.rst | 23 ++-- doc/build/ubuntu_install.md | 21 --- doc/build/ubuntu_install.rst | 25 ++++ doc_cn/build_and_install/index.rst | 25 +++- .../install/docker_install.rst | 47 +++++-- .../install/paddle_version.txt | 11 ++ .../install/ubuntu_install.rst | 42 ++---- 10 files changed, 234 insertions(+), 177 deletions(-) delete mode 100644 doc/build/docker_install.md create mode 100644 doc/build/docker_install.rst delete mode 100644 doc/build/ubuntu_install.md create mode 100644 doc/build/ubuntu_install.rst create mode 100644 doc_cn/build_and_install/install/paddle_version.txt diff --git a/README.md b/README.md index cc2fc68ac3143..05e544186a2d0 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,9 @@ Welcome to the PaddlePaddle GitHub. -The software will be released on Sept. 30 with full documentation and installation support. +Do you wanna try and play PaddlePaddle? Just following the [Install Guide](http://www.paddlepaddle.org/doc/build/index.html) and [Quick Start](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html). The chinese version is [Install Guide](http://www.paddlepaddle.org/doc_cn/build_and_install/index.html) and [Quick Start](http://www.paddlepaddle.org/doc_cn/demo/quick_start/index.html). -A pre-release version is available now for those who are eager to take a look. +Please refer to our [release log](https://github.com/baidu/Paddle/releases) to track the latest feature of PaddlePaddle. PaddlePaddle (PArallel Distributed Deep LEarning) is an easy-to-use, efficient, flexible and scalable deep learning platform, which is originally diff --git a/doc/build/docker_install.md b/doc/build/docker_install.md deleted file mode 100644 index 3cd9d1730a22b..0000000000000 --- a/doc/build/docker_install.md +++ /dev/null @@ -1,91 +0,0 @@ -Docker installation guide -==================== -PaddlePaddle provides some pre-compiled binary, including Docker images, ubuntu deb packages. It is welcomed to contributed more installation package of different linux distribution (such as ubuntu, centos, debian, gentoo and so on). We recommend to use Docker images to deploy PaddlePaddle. -## Docker installation - -Docker is a tool designed to make it easier to create, deploy, and run applications by using containers. - -### PaddlePaddle Docker images -There are six Docker images: - -- paddledev/paddle:cpu-latest: PaddlePaddle CPU binary image. -- paddledev/paddle:gpu-latest: PaddlePaddle GPU binary image. -- paddledev/paddle:cpu-devel-latest: PaddlePaddle CPU binary image plus source code. -- paddledev/paddle:gpu-devel-latest: PaddlePaddle GPU binary image plus source code. -- paddledev/paddle:cpu-demo-latest: PaddlePaddle CPU binary image plus source code and demo -- paddledev/paddle:gpu-demo-latest: PaddlePaddle GPU binary image plus source code and demo - -Tags with latest will be replaced by a released version. - -### Download and Run Docker images - -You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information. - -You can use ```docker pull ```to download images first, or just launch a container with ```docker run```: -```bash -docker run -it paddledev/paddle:cpu-latest -``` - -If you want to launch container with GPU support, you need to set some environment variables at the same time: - -```bash -export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}" -export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') -docker run -it paddledev/paddle:gpu-latest -``` - -### Notice - -#### Performance - -Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected. - -If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(10GbE), it is recommended to use config "-net = host". - - - - -#### Remote access -If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information. - -Following is a simple Dockerfile with ssh: -```bash -FROM paddledev/paddle - -MAINTAINER PaddlePaddle dev team - -RUN apt-get update -RUN apt-get install -y openssh-server -RUN mkdir /var/run/sshd -RUN echo 'root:root' | chpasswd - -RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config -RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config - -EXPOSE 22 - -CMD ["/usr/sbin/sshd", "-D"] -``` - -Then you can build an image with Dockerfile and launch a container: - -```bash -# cd into Dockerfile directory -docker build . -t paddle_ssh -# run container, and map host machine port 8022 to container port 22 -docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh -``` -Now, you can ssh on port 8022 to access the container, username is root, password is also root: - -```bash -ssh -p 8022 root@YOUR_HOST_MACHINE -``` - - -You can stop and delete the container as following: -```bash -# stop -docker stop paddle_ssh_machine -# delete -docker rm paddle_ssh_machine -``` diff --git a/doc/build/docker_install.rst b/doc/build/docker_install.rst new file mode 100644 index 0000000000000..542b9bac27afb --- /dev/null +++ b/doc/build/docker_install.rst @@ -0,0 +1,122 @@ +Docker installation guide +========================== + +PaddlePaddle provide the `Docker `_ image. `Docker`_ is a lightweight container utilities. The performance of PaddlePaddle in `Docker`_ container is basically as same as run it in a normal linux. The `Docker`_ is a very convenient way to deliver the binary release for linux programs. + +.. note:: + + The `Docker`_ image is the recommended way to run PaddlePaddle + +PaddlePaddle Docker images +-------------------------- + +There are 12 `images `_ for PaddlePaddle, and the name is :code:`paddle-dev/paddle`, tags are\: + + ++-----------------+------------------+------------------------+-----------------------+ +| | normal | devel | demo | ++=================+==================+========================+=======================+ +| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ + +And the three columns are: + +* normal\: The docker image only contains binary of PaddlePaddle. +* devel\: The docker image contains PaddlePaddle binary, source code and essential build environment. +* demo\: The docker image contains the dependencies to run PaddlePaddle demo. + +And the four rows are: + +* CPU\: CPU Version. Support CPU which has :code:`AVX` instructions. +* GPU\: GPU Version. Support GPU, and cpu has :code:`AVX` instructions. +* CPU WITHOUT AVX\: CPU Version, which support most CPU even doesn't have :code:`AVX` instructions. +* GPU WITHOUT AVX\: GPU Version, which support most CPU even doesn't have :code:`AVX` instructions. + +User can choose any version depends on machine. The following script can help you to detect your CPU support :code:`AVX` or not. + +.. code-block:: bash + + if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi + +If the output is :code:`Support AVX`, then you can choose the AVX version of PaddlePaddle, otherwise, you need select :code:`noavx` version of PaddlePaddle. For example, the CPU develop version of PaddlePaddle is :code:`paddle-dev/paddle:cpu-devel-latest`. + +The PaddlePaddle images don't contain any entry command. You need to write your entry command to use this image. See :code:`Remote Access` part or just use following command to run a :code:`bash` + +.. code-block:: bash + + docker run -it paddledev/paddle:cpu-latest /bin/bash + + +Download and Run Docker images +------------------------------ + +You have to install Docker in your machine which has linux kernel version 3.10+ first. You can refer to the official guide https://docs.docker.com/engine/installation/ for further information. + +You can use :code:`docker pull ` to download images first, or just launch a container with :code:`docker run` \: + +.. code-block:: bash + + docker run -it paddledev/paddle:cpu-latest + + +If you want to launch container with GPU support, you need to set some environment variables at the same time: + +.. code-block:: bash + + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest + + +Some notes for docker +--------------------- + +Performance ++++++++++++ + +Since Docker is based on the lightweight virtual containers, the CPU computing performance maintains well. And GPU driver and equipments are all mapped to the container, so the GPU computing performance would not be seriously affected. + +If you use high performance nic, such as RDMA(RoCE 40GbE or IB 56GbE), Ethernet(10GbE), it is recommended to use config "-net = host". + + + + +Remote access ++++++++++++++ + + +If you want to enable ssh access background, you need to build an image by yourself. Please refer to official guide https://docs.docker.com/engine/reference/builder/ for further information. + +Following is a simple Dockerfile with ssh: + +.. literalinclude:: ../../doc_cn/build_and_install/install/paddle_ssh.Dockerfile + +Then you can build an image with Dockerfile and launch a container: + +.. code-block:: bash + + # cd into Dockerfile directory + docker build . -t paddle_ssh + # run container, and map host machine port 8022 to container port 22 + docker run -d -p 8022:22 --name paddle_ssh_machine paddle_ssh + +Now, you can ssh on port 8022 to access the container, username is root, password is also root: + +.. code-block:: bash + + ssh -p 8022 root@YOUR_HOST_MACHINE + +You can stop and delete the container as following: + +.. code-block:: bash + + # stop + docker stop paddle_ssh_machine + # delete + docker rm paddle_ssh_machine diff --git a/doc/build/index.rst b/doc/build/index.rst index d6d0d19e110fc..511cdea145c7f 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -10,31 +10,24 @@ Install PaddlePaddle install_* internal/install_from_jumbo.md + docker_install.rst + ubuntu_install.rst Build from Source ----------------- -If you want to hack and contribute PaddlePaddle source code, following guides can help you\: +.. warning:: -.. toctree:: - :maxdepth: 1 - :glob: + Please use :code:`deb` package or :code:`docker` image to install paddle. The building guide is used for hacking or contributing to PaddlePaddle. + - build_from_source.md - contribute_to_paddle.md - -Docker and Debian Package installation --------------------------------------- - -Note: The installation packages are still in pre-release -state and your experience of installation may not be smooth. +If you want to hack and contribute PaddlePaddle source code, following guides can help you\: -If you want to pack docker image, the following guide can help you\: .. toctree:: :maxdepth: 1 :glob: - docker_install.md - ubuntu_install.md + build_from_source.md + contribute_to_paddle.md diff --git a/doc/build/ubuntu_install.md b/doc/build/ubuntu_install.md deleted file mode 100644 index c30a8f6db5d9e..0000000000000 --- a/doc/build/ubuntu_install.md +++ /dev/null @@ -1,21 +0,0 @@ -Debian Package installation guide -================================= - -## Debian Package installation -Currently , PaddlePaddle only provides ubuntu14.04 debian packages. -There are two versions package, including CPU and GPU. The download address is: - -https://github.com/baidu/Paddle/releases/tag/V0.8.0b0 - - -After downloading PaddlePaddle deb packages, you can run: - -```bash -dpkg -i paddle-0.8.0b-cpu.deb -apt-get install -f -``` -And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences. - -**Note** - -PaddlePaddle package only supports x86 CPU with AVX instructions. If not, you have to download and build from source code. diff --git a/doc/build/ubuntu_install.rst b/doc/build/ubuntu_install.rst new file mode 100644 index 0000000000000..ea8042085bf45 --- /dev/null +++ b/doc/build/ubuntu_install.rst @@ -0,0 +1,25 @@ +Debian Package installation guide +================================= + +PaddlePaddle supports :code:`deb` pacakge. The installation of this :code:`deb` package is tested in ubuntu 14.04, but it should be support other debian based linux, too. + +There are four versions of debian package, :code:`cpu`, :code:`gpu`, :code:`cpu-noavx`, :code:`gpu-noavx`. And :code:`noavx` version is used to support CPU which does not contain :code:`AVX` instructions. The download url of :code:`deb` package is \: https://github.com/baidu/Paddle/releases/ + + +After downloading PaddlePaddle deb packages, you can use :code:`gdebi` install. + +.. code-block:: bash + + gdebi paddle-*.deb + +If :code:`gdebi` is not installed, you can use :code:`sudo apt-get install gdebi` to install it. + +Or you can use following commands to install PaddlePaddle. + +.. code-block:: bash + + dpkg -i paddle-*.deb + apt-get install -f + +And if you use GPU version deb package, you need to install CUDA toolkit and cuDNN, and set related environment variables(such as LD_LIBRARY_PATH) first. It is normal when `dpkg -i` get errors. `apt-get install -f` will continue install paddle, and install dependences. + diff --git a/doc_cn/build_and_install/index.rst b/doc_cn/build_and_install/index.rst index e21fc98c63dcd..2205e282248c4 100644 --- a/doc_cn/build_and_install/index.rst +++ b/doc_cn/build_and_install/index.rst @@ -1,19 +1,32 @@ 编译与安装 ======================== -PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜像,ubuntu的deb安装包等。我们推荐使用Docker镜像来部署环境,同时欢迎贡献更多的安装包。 - -Note: The intallation packages are still in pre-release state and your experience of installation may not be smooth. +安装 +++++ -注意:目前PaddlePaddle的安装包还处在pre-release的状态,使用起来或许会不是很顺畅。 +PaddlePaddle提供数个预编译的二进制来进行安装,包括Docker镜像,ubuntu的deb安装包等。我们推荐使用Docker镜像来部署环境,同时欢迎贡献更多的安装包。 .. toctree:: :maxdepth: 1 :glob: - 源码下载(对内) <../build/internal/download_paddle_source_zh_cn.rst> 使用Jumbo安装(对内) <../build/internal/install_from_jumbo.rst> - 从源码编译安装(对内) <../build/internal/build_from_source_zh_cn.rst> install/docker_install.rst install/ubuntu_install.rst + + + +编译 +++++ + +.. warning:: + + 编译选项主要推荐高级用户查看,普通用户请走安装流程。 + +.. toctree:: + :maxdepth: 1 + :glob: + + 源码下载(对内) <../build/internal/download_paddle_source_zh_cn.rst> + 从源码编译安装(对内) <../build/internal/build_from_source_zh_cn.rst> cmake/index.rst diff --git a/doc_cn/build_and_install/install/docker_install.rst b/doc_cn/build_and_install/install/docker_install.rst index 4154cb86d8d64..44aa2a0983f4f 100644 --- a/doc_cn/build_and_install/install/docker_install.rst +++ b/doc_cn/build_and_install/install/docker_install.rst @@ -14,20 +14,43 @@ PaddlePaddle提供了Docker的使用镜像。PaddlePaddle推荐使用Docker进 PaddlePaddle提供的Docker镜像版本 -------------------------------- -我们提供了6个Docker image\: +我们提供了12个 `Docker image `_ ,他们的image name都是 :code:`paddle-dev/paddle` ,tag分别为 -* paddledev/paddle\:cpu-latest\: PaddlePaddle的CPU二进制 -* paddledev/paddle\:gpu-latest\: PaddlePaddle的GPU二进制 -* paddledev/paddle\:cpu-devel-latest\: PaddlePaddle的CPU二进制,同时包含CPU开发环境和源码 -* paddledev/paddle\:gpu-devel-latest\: PaddlePaddle的GPU二进制,同时包含GPU开发环境和源码 -* paddledev/paddle\:cpu-demo-latest\: PaddlePaddle的CPU二进制,同时包含CPU开发环境、源码和运行demo的必要依赖 -* paddledev/paddle\:gpu-demo-latest\: PaddlePaddle的GPU二进制,同时包含GPU开发环境、源码和运行demo的必要依赖 ++-----------------+------------------+------------------------+-----------------------+ +| | normal | devel | demo | ++=================+==================+========================+=======================+ +| CPU | cpu-latest | cpu-devel-latest | cpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest | ++-----------------+------------------+------------------------+-----------------------+ +| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ +| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest | ++-----------------+------------------+------------------------+-----------------------+ -同时,不同的稳定版本,会将latest替换成稳定版本的版本号。 +其中,横向包括三个版本,normal,devel和demo。 + +* Normal: 正常的Docker image,只包括paddle的二进制 +* Devel: 包括Paddle的二进制、编译环境和源代码 +* Demo: 包括Paddle运行demo所需要的依赖 + +纵向包括四个版本,他们是。 + +* CPU: CPU版本。需要支持AVX指令集的CPU +* GPU: GPU版本。需要支持AVX指令集的CPU +* CPU WITHOUT AVX: CPU版本,不支持AVX指令集的CPU也可以运行 +* GPU WITHOUT AVX: GPU版本,不需要AVX指令集的CPU也可以运行。 + +用户可以选择对应版本的docker image。使用如下脚本可以确定本机的CPU知否支持 :code:`AVX` 指令集\: + +.. code-block:: bash + + if cat /proc/cpuinfo | grep -q avx ; then echo "Support AVX"; else echo "Not support AVX"; fi + +如果输出 :code:`Support AVX`,则可以选择上表中的AVX版本PaddlePaddle。否则需要选择非AVX的PaddlePaddle。选择普通CPU版本的devel版本的image,则可以使用 :code:`paddle-dev/paddle:cpu-devel-latest` 来引用这个image。 PaddlePaddle提供的镜像并不包含任何命令运行,想要运行PaddlePaddle,您需要进入镜像运行PaddlePaddle -程序或者自定义一个含有启动脚本的image。具体请参考注意事项中的 -`使用ssh访问PaddlePaddle镜像` +程序或者自定义一个含有启动脚本的image。具体请参考注意事项中的 :code:`使用ssh访问PaddlePaddle镜像` 下载和运行Docker镜像 -------------------- @@ -44,7 +67,7 @@ mac osx或者是windows机器,请参考 .. code-block:: bash - $ docker run -it paddledev/paddlepaddle:latest-cpu + $ docker run -it paddledev/paddlepaddle:cpu-latest 即可启动和进入PaddlePaddle的container。如果运行GPU版本的PaddlePaddle,则需要先将 cuda相关的Driver和设备映射进container中,脚本类似于 @@ -53,7 +76,7 @@ cuda相关的Driver和设备映射进container中,脚本类似于 $ export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" $ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') - $ docker run -it paddledev/paddlepaddle:latest-gpu + $ docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddlepaddle:latest-gpu 进入Docker container后,运行 :code:`paddle version` 即可打印出PaddlePaddle的版本和构建 信息。安装完成的PaddlePaddle主体包括三个部分, :code:`paddle` 脚本, python的 diff --git a/doc_cn/build_and_install/install/paddle_version.txt b/doc_cn/build_and_install/install/paddle_version.txt new file mode 100644 index 0000000000000..7b2bfd2b1b3a9 --- /dev/null +++ b/doc_cn/build_and_install/install/paddle_version.txt @@ -0,0 +1,11 @@ +PaddlePaddle 0.8.0b1, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_glog: ON + with_gflags: ON + with_metric_learning: + with_timer: OFF + with_predict_sdk: \ No newline at end of file diff --git a/doc_cn/build_and_install/install/ubuntu_install.rst b/doc_cn/build_and_install/install/ubuntu_install.rst index a813d9da2e52d..70ac5225bd82e 100644 --- a/doc_cn/build_and_install/install/ubuntu_install.rst +++ b/doc_cn/build_and_install/install/ubuntu_install.rst @@ -1,55 +1,37 @@ 使用deb包在Ubuntu上安装PaddlePaddle =================================== -PaddlePaddle目前支持ubuntu 14.04版本使用deb包安装。更多的安装包PaddlePaddle会在近期提供。 -欢迎大家贡献各个发行版的安装包(例如,ubuntu,centos,debian,gentoo)。 +PaddlePaddle目前支持使用deb包安装。Paddle的 :code:`deb` 安装包在ubuntu 14.04中正确,但理论上支持其他的 debian 发行版。 -PaddlePaddle的ubuntu安装包分为两个版本,即CPU版本,和GPU版本,他们的下载地址是\: -https://github.com/baidu/Paddle/releases/tag/V0.8.0b0 -需要注意的是,目前PaddlePaddle的安装包只支持 -`AVX `_ -指令集的X86 CPU。如果系统使用不支持 `AVX`_ 指令集的CPU运行PaddlePaddle,那么需要从源码 -编译PaddlePaddle,请参考 `编译文档 <../cmake/index.html>`_ 。 +PaddlePaddle的ubuntu安装包分为四个版本,他们是 cpu、gpu、cpu-noavx、gpu-noavx 四个版本。其中 noavx 用于不支持AVX指令集的cpu。安装包的下载地址是\: https://github.com/baidu/Paddle/releases/ -用户需要先将PaddlePaddle安装包下载到本地,然后执行如下命令即可完成安装。 + +用户需要先将PaddlePaddle安装包下载到本地,然后执行如下 :code:`gdebi` 命令即可完成安装。 .. code-block:: shell - dpkg -i paddle-*-cpu.deb - apt-get install -f + gdebi paddle-*-cpu.deb + +如果 :code:`gdebi` 没有安装,则需要使用 :code:`sudo apt-get install gdebi`, 来安装 :code:`gdebi` 。 -在 :code:`dpkg -i` 的时候如果报一些依赖未找到的错误是正常的, -在 :code:`apt-get install -f` 里会继续安装 PaddlePaddle。 或者使用下面一条命令安装. .. code-block:: shell - gdebi paddle-*-cpu.deb - -如果 :code:`gdebi` 没有安装,则需要使用 :code:`sudo apt-get install gdebi`, 来安装 :code:`gdebi` + dpkg -i paddle-*-cpu.deb + apt-get install -f +在 :code:`dpkg -i` 的时候如果报一些依赖未找到的错误是正常的, +在 :code:`apt-get install -f` 里会继续安装 PaddlePaddle。 需要注意的是,如果使用GPU版本的PaddlePaddle,请安装CUDA 7.5 和CUDNN 5到本地环境中, 并设置好对应的环境变量(LD_LIBRARY_PATH等等)。 安装完成后,可以使用命令 :code:`paddle version` 查看安装后的paddle 版本。可能的输出为 -.. code-block:: text - - PaddlePaddle 0.8.0b1, compiled with - with_avx: ON - with_gpu: OFF - with_double: OFF - with_python: ON - with_rdma: OFF - with_glog: ON - with_gflags: ON - with_metric_learning: - with_timer: OFF - with_predict_sdk: - +.. literalinclude:: paddle_version.txt 可能遇到的问题 -------------- From 29c16e22420b8b69d17572accd2f6fc0239c8c7e Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 1 Oct 2016 10:55:13 +0800 Subject: [PATCH 006/180] Update Readme (#153) * Update Readme * Update readme * Update readme --- README.md | 69 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index 05e544186a2d0..2162488f2f7de 100644 --- a/README.md +++ b/README.md @@ -1,60 +1,65 @@ # PaddlePaddle -[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle) -Welcome to the PaddlePaddle GitHub. - -Do you wanna try and play PaddlePaddle? Just following the [Install Guide](http://www.paddlepaddle.org/doc/build/index.html) and [Quick Start](http://www.paddlepaddle.org/doc/demo/quick_start/index_en.html). The chinese version is [Install Guide](http://www.paddlepaddle.org/doc_cn/build_and_install/index.html) and [Quick Start](http://www.paddlepaddle.org/doc_cn/demo/quick_start/index.html). +| **`Linux`** | +|----------------| +|[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle)| -Please refer to our [release log](https://github.com/baidu/Paddle/releases) to track the latest feature of PaddlePaddle. +Welcome to the PaddlePaddle GitHub. PaddlePaddle (PArallel Distributed Deep LEarning) is an easy-to-use, efficient, flexible and scalable deep learning platform, which is originally developed by Baidu scientists and engineers for the purpose of applying deep learning to many products at Baidu. +Our vision is to enable deep learning for everyone via PaddlePaddle. +Please refer to our [release log](https://github.com/baidu/Paddle/releases) to track the latest feature of PaddlePaddle. + ## Features - **Flexibility** - PaddlePaddle supports a wide range of neural network architectures and - optimization algorithms. It is easy to configure complex models such as - neural machine translation model with attention mechanism or complex memory - connection. + PaddlePaddle supports a wide range of neural network architectures and + optimization algorithms. It is easy to configure complex models such as + neural machine translation model with attention mechanism or complex memory + connection. - **Efficiency** - In order to unleash the power of heterogeneous computing resource, - optimization occurs at different levels of PaddlePaddle, including - computing, memory, architecture and communication. The following are some - examples: - 1. Optimized math operations through SSE/AVX intrinsics, BLAS libraries - (e.g. MKL, ATLAS, cuBLAS) or customized CPU/GPU kernels. - 2. Highly optimized recurrent networks which can handle **variable-length** - sequence without padding. - 3. Optimized local and distributed training for models with high dimensional - sparse data. + In order to unleash the power of heterogeneous computing resource, + optimization occurs at different levels of PaddlePaddle, including + computing, memory, architecture and communication. The following are some + examples: + + - Optimized math operations through SSE/AVX intrinsics, BLAS libraries + (e.g. MKL, ATLAS, cuBLAS) or customized CPU/GPU kernels. + - Highly optimized recurrent networks which can handle **variable-length** + sequence without padding. + - Optimized local and distributed training for models with high dimensional + sparse data. - **Scalability** - With PaddlePaddle, it is easy to use many CPUs/GPUs and machines to speed - up your training. PaddlePaddle can achieve high throughput and performance - via optimized communication. + With PaddlePaddle, it is easy to use many CPUs/GPUs and machines to speed + up your training. PaddlePaddle can achieve high throughput and performance + via optimized communication. - **Connected to Products** - In addition, PaddlePaddle is also designed to be easily deployable. At Baidu, - PaddlePaddle has been deployed into products or service with a vast number - of users, including ad click-through rate (CTR) prediction, large-scale image - classification, optical character recognition(OCR), search ranking, computer - virus detection, recommendation, etc. It is widely utilized in products at - Baidu and it has achieved a significant impact. We hope you can also exploit - the capability of PaddlePaddle to make a huge impact for your product. + In addition, PaddlePaddle is also designed to be easily deployable. At Baidu, + PaddlePaddle has been deployed into products or service with a vast number + of users, including ad click-through rate (CTR) prediction, large-scale image + classification, optical character recognition(OCR), search ranking, computer + virus detection, recommendation, etc. It is widely utilized in products at + Baidu and it has achieved a significant impact. We hope you can also exploit + the capability of PaddlePaddle to make a huge impact for your product. ## Installation -See [Installation Guide](http://paddlepaddle.org/doc/build/) to install from pre-built package or build from the source code. (Note: The installation packages are still in pre-release state and your experience of installation may not be smooth.). - +Check out the [Install Guide](http://paddlepaddle.org/doc/build/) to install from +pre-built packages (**docker image**, **deb package**) or +directly build on **Linux** and **Mac OS X** from the source code. + ## Documentation -- [Chinese Documentation](http://paddlepaddle.org/doc_cn/)
+Both [English Docs](http://paddlepaddle.org/doc/) and [Chinese Docs](http://paddlepaddle.org/doc_cn/) are provided for our users and developers. - [Quick Start](http://paddlepaddle.org/doc/demo/quick_start/index_en)
You can follow the quick start tutorial to learn how use PaddlePaddle From d8e28741858be60718973c59e21211968f460cf9 Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 8 Oct 2016 10:07:07 +0800 Subject: [PATCH 007/180] Fix CUDA_VERSION Comparsion (#165) --- cmake/flags.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 4b99e7f7fb6af..bcd734b7e4dbc 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -88,15 +88,15 @@ endfunction() # Common gpu architectures: Kepler, Maxwell foreach(capability 30 35 50) - list(APPEND __arch_flags " -gencode arch=compute_${capability},code=sm_${capability}") + list(APPEND __arch_flags " -gencode arch=compute_${capability},code=sm_${capability}") endforeach() -if (CUDA_VERSION VERSION_GREATER "7.0") +if (CUDA_VERSION VERSION_GREATER "7.0" OR CUDA_VERSION VERSION_EQUAL "7.0") list(APPEND __arch_flags " -gencode arch=compute_52,code=sm_52") endif() # Modern gpu architectures: Pascal -if (CUDA_VERSION VERSION_GREATER "8.0") +if (CUDA_VERSION VERSION_GREATER "8.0" OR CUDA_VERSION VERSION_EQUAL "8.0") list(APPEND __arch_flags " -gencode arch=compute_60,code=sm_60") endif() From 1c09e9d5d17bc40365d35b1a8bf7db1aad26e41b Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 8 Oct 2016 10:37:50 +0800 Subject: [PATCH 008/180] Update readme (#155) * Update readme * Apache 2.0 --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 2162488f2f7de..1cc0444c0617a 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # PaddlePaddle -| **`Linux`** | -|----------------| -|[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle)| +| **`Linux`** | **`License`** | **`Chat Room`** | +|----------------|---------------|-----------------| +|[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle)|[![License](https://img.shields.io/badge/license-Apache%202.0-green.svg)](LICENSE)|[![Join the chat at https://gitter.im/PaddlePaddle/Deep_Learning](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/PaddlePaddle/Deep_Learning?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)| Welcome to the PaddlePaddle GitHub. @@ -86,9 +86,9 @@ Both [English Docs](http://paddlepaddle.org/doc/) and [Chinese Docs](http://padd - [Source Code Documents](http://paddlepaddle.org/doc/source/)
## Ask Questions - -If you want to ask questions and discuss about methods and models, welcome -to send email to paddle-dev@baidu.com. Framework development discussions and +Please join the [**gitter chat**](https://gitter.im/PaddlePaddle/Deep_Learning) or send email to +**paddle-dev@baidu.com** to ask questions and talk about methods and models. +Framework development discussions and bug reports are collected on [Issues](https://github.com/baidu/paddle/issues). ## Copyright and License From 1c2ebe467bb3c88b858f5b583ab48c8944c7f0f2 Mon Sep 17 00:00:00 2001 From: Zrachel Date: Sat, 8 Oct 2016 11:28:57 +0800 Subject: [PATCH 009/180] add interface and test of RecurrentGradientMachine (#156) * add interface and unittest of RecurrentGradientMachine for the function of multiple Subsequence inlinks with unequal token length --- paddle/gserver/tests/rnn_data_provider.py | 30 ++++- ...ce_nest_rnn_multi_unequalength_inputs.conf | 106 ++++++++++++++++++ ...equence_rnn_multi_unequalength_inputs.conf | 75 +++++++++++++ .../tests/test_RecurrentGradientMachine.cpp | 9 ++ .../paddle/trainer_config_helpers/layers.py | 30 ++++- 5 files changed, 246 insertions(+), 4 deletions(-) create mode 100644 paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf create mode 100644 paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.conf diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/gserver/tests/rnn_data_provider.py index 347d5891b906b..5c3b062309c51 100644 --- a/paddle/gserver/tests/rnn_data_provider.py +++ b/paddle/gserver/tests/rnn_data_provider.py @@ -21,7 +21,7 @@ @provider(input_types=[integer_value_sub_sequence(10), - integer_value(2)], + integer_value(3)], should_shuffle=False) def process_subseq(settings, file_name): for d in data: @@ -29,7 +29,7 @@ def process_subseq(settings, file_name): @provider(input_types=[integer_value_sequence(10), - integer_value(2)], + integer_value(3)], should_shuffle=False) def process_seq(settings, file_name): for d in data: @@ -37,3 +37,29 @@ def process_seq(settings, file_name): for subseq in d[0]: seq += subseq yield seq, d[1] + +data2 = [ + [[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]] ,0], + [[[0, 2], [2, 5], [0, 1, 2]],[[1, 5], [4], [2, 3, 6, 1]], 1], +] + +@provider(input_types=[integer_value_sub_sequence(10), + integer_value_sub_sequence(10), + integer_value(2)], + should_shuffle=False) +def process_unequalength_subseq(settings, file_name): + for d in data2: + yield d + + +@provider(input_types=[integer_value_sequence(10), + integer_value_sequence(10), + integer_value(2)], + should_shuffle=False) +def process_unequalength_seq(settings, file_name): + for d in data2: + words1=reduce(lambda x,y: x+y, d[0]) + words2=reduce(lambda x,y: x+y, d[1]) + yield words1, words2, d[2] + + diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf new file mode 100644 index 0000000000000..d0b9450f4b9f9 --- /dev/null +++ b/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf @@ -0,0 +1,106 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +######################## data source ################################ +define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', + test_list=None, + module='rnn_data_provider', + obj='process_unequalength_subseq') + + +settings(batch_size=2, learning_rate=0.01) +######################## network configure ################################ +dict_dim = 10 +word_dim = 8 +hidden_dim = 8 +label_dim = 2 + +speaker1 = data_layer(name="word1", size=dict_dim) +speaker2 = data_layer(name="word2", size=dict_dim) + +emb1 = embedding_layer(input=speaker1, size=word_dim) +emb2 = embedding_layer(input=speaker2, size=word_dim) + +# This hierachical RNN is designed to be equivalent to the simple RNN in +# sequence_rnn_multi_unequalength_inputs.conf + +def outer_step(x1, x2): + outer_mem1 = memory(name = "outer_rnn_state1", size = hidden_dim) + outer_mem2 = memory(name = "outer_rnn_state2", size = hidden_dim) + def inner_step1(y): + inner_mem = memory(name = 'inner_rnn_state_' + y.name, + size = hidden_dim, + boot_layer = outer_mem1) + out = fc_layer(input = [y, inner_mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'inner_rnn_state_' + y.name) + return out + + def inner_step2(y): + inner_mem = memory(name = 'inner_rnn_state_' + y.name, + size = hidden_dim, + boot_layer = outer_mem2) + out = fc_layer(input = [y, inner_mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'inner_rnn_state_' + y.name) + return out + + encoder1 = recurrent_group( + step = inner_step1, + name = 'inner1', + input = x1) + + encoder2 = recurrent_group( + step = inner_step2, + name = 'inner2', + input = x2) + + sentence_last_state1 = last_seq(input = encoder1, name = 'outer_rnn_state1') + sentence_last_state2_ = last_seq(input = encoder2, name = 'outer_rnn_state2') + + encoder1_expand = expand_layer(input = sentence_last_state1, + expand_as = encoder2) + + return [encoder1_expand, encoder2] + + +encoder1_rep, encoder2_rep = recurrent_group( + name="outer", + step=outer_step, + input=[SubsequenceInput(emb1), SubsequenceInput(emb2)], + targetInlink=emb2) + +encoder1_last = last_seq(input = encoder1_rep) +encoder1_expandlast = expand_layer(input = encoder1_last, + expand_as = encoder2_rep) +context = mixed_layer(input = [identity_projection(encoder1_expandlast), + identity_projection(encoder2_rep)], + size = hidden_dim) + +rep = last_seq(input=context) +prob = fc_layer(size=label_dim, + input=rep, + act=SoftmaxActivation(), + bias_attr=True) + +outputs(classification_cost(input=prob, + label=data_layer(name="label", size=label_dim))) + diff --git a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.conf b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.conf new file mode 100644 index 0000000000000..28b1cb98cf132 --- /dev/null +++ b/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.conf @@ -0,0 +1,75 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +######################## data source ################################ +define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', + test_list=None, + module='rnn_data_provider', + obj='process_unequalength_seq') + + +settings(batch_size=2, learning_rate=0.01) +######################## network configure ################################ +dict_dim = 10 +word_dim = 8 +hidden_dim = 8 +label_dim = 2 + +speaker1 = data_layer(name="word1", size=dict_dim) +speaker2 = data_layer(name="word2", size=dict_dim) + +emb1 = embedding_layer(input=speaker1, size=word_dim) +emb2 = embedding_layer(input=speaker2, size=word_dim) + +# This hierachical RNN is designed to be equivalent to the RNN in +# sequence_nest_rnn_multi_unequalength_inputs.conf + +def step(x1, x2): + def calrnn(y): + mem = memory(name = 'rnn_state_' + y.name, size = hidden_dim) + out = fc_layer(input = [y, mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'rnn_state_' + y.name) + return out + + encoder1 = calrnn(x1) + encoder2 = calrnn(x2) + return [encoder1, encoder2] + +encoder1_rep, encoder2_rep = recurrent_group( + name="stepout", + step=step, + input=[emb1, emb2]) + +encoder1_last = last_seq(input = encoder1_rep) +encoder1_expandlast = expand_layer(input = encoder1_last, + expand_as = encoder2_rep) +context = mixed_layer(input = [identity_projection(encoder1_expandlast), + identity_projection(encoder2_rep)], + size = hidden_dim) + +rep = last_seq(input=context) +prob = fc_layer(size=label_dim, + input=rep, + act=SoftmaxActivation(), + bias_attr=True) + +outputs(classification_cost(input=prob, + label=data_layer(name="label", size=label_dim))) + diff --git a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp index 550df0a31844e..ae7f617371ca5 100644 --- a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp @@ -73,6 +73,7 @@ void CalCost(const string& conf, const string& dir, real* cost, *ThreadLocalRand::getSeed() = FLAGS_seed; vecW.randnorm(0, 0.1); + vecMomentum.randnorm(0, 0.1); trainer.startTrain(); for (int i = 0; i < num_passes; ++i) { @@ -140,6 +141,14 @@ TEST(RecurrentGradientMachine, rnn_multi_input) { } } +TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { + for (bool useGpu : {false, true}) { + test("gserver/tests/sequence_rnn_multi_unequalength_inputs.conf", + "gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.conf", + 1e-6, useGpu); + } +} + int main(int argc, char** argv) { if (paddle::version::isWithPyDataProvider()) { if (!paddle::version::isWithGpu()) { diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index c355dc042ac18..47db197f422ea 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2347,7 +2347,7 @@ def __init__(self, input): @wrap_name_default("recurrent_group") -def recurrent_group(step, input, reverse=False, name=None): +def recurrent_group(step, input, reverse=False, name=None, targetInlink=None): """ Recurrent layer group is an extremely flexible recurrent unit in PaddlePaddle. As long as the user defines the calculation done within a @@ -2401,6 +2401,17 @@ def step(input): :param reverse: If reverse is set true, the recurrent unit will process the input sequence in a reverse order. :type reverse: bool + + :param targetInlink: the input layer which share info with layer group's output + + Param input specifies multiple input layers. For + SubsequenceInput inputs, config should assign one input + layer that share info(the number of sentences and the number + of words in each sentence) with all layer group's outputs. + targetInlink should be one of the layer group's input. + + :type targetInlink: LayerOutput|SubsequenceInput + :return: LayerOutput object. :rtype: LayerOutput """ @@ -2419,6 +2430,20 @@ def is_in_links(x): in_links = filter(is_in_links, input) + def targetInlink_in_inlinks(): + for inlink in in_links: + if isinstance(inlink, SubsequenceInput): + if targetInlink == inlink.input: + return True + elif targetInlink == inlink: + return True + return False + + assert(targetInlink == None or targetInlink_in_inlinks()) + targetInlinkName = None if targetInlink == None \ + else targetInlink.name if isinstance(targetInlink, LayerOutput) \ + else targetInlink.input.name + contains_sub_seq = [False] def map_in_links(x): @@ -2430,7 +2455,8 @@ def map_in_links(x): RecurrentLayerGroupWithoutOutLinksBegin( name=name, in_links=map(map_in_links, in_links), - seq_reversed=reverse) + seq_reversed=reverse, + target_inlinkname=targetInlinkName) in_args = [] for each_input in input: assert is_single_input(each_input) From db38043439cc9beb1dec391302317510028e4ab7 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Sat, 8 Oct 2016 16:58:44 +0800 Subject: [PATCH 010/180] bug fix for dataprovider for quick start inference (#168) --- demo/quick_start/dataprovider_bow.py | 2 +- demo/quick_start/dataprovider_emb.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/quick_start/dataprovider_bow.py b/demo/quick_start/dataprovider_bow.py index 435e6d8175bd6..f8cde189cf87d 100644 --- a/demo/quick_start/dataprovider_bow.py +++ b/demo/quick_start/dataprovider_bow.py @@ -79,6 +79,6 @@ def predict_initializer(settings, dictionary, **kwargs): def process_predict(settings, file_name): with open(file_name, 'r') as f: for line in f: - comment = line.strip() + comment = line.strip().split() word_vector = [settings.word_dict.get(w, UNK_IDX) for w in comment] yield word_vector diff --git a/demo/quick_start/dataprovider_emb.py b/demo/quick_start/dataprovider_emb.py index e5030c5e71aa5..ca940a89e5477 100755 --- a/demo/quick_start/dataprovider_emb.py +++ b/demo/quick_start/dataprovider_emb.py @@ -47,6 +47,6 @@ def predict_initializer(settings, dictionary, **kwargs): def process_predict(settings, file_name): with open(file_name, 'r') as f: for line in f: - comment = line.strip() + comment = line.strip().split() word_slot = [settings.word_dict.get(w, UNK_IDX) for w in comment] yield word_slot From 0ab332242fbaad8f541566e5ecc602e2180c6591 Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 8 Oct 2016 18:22:47 +0800 Subject: [PATCH 011/180] Support MAC OS Sierra (#169) --- cmake/flags.cmake | 5 +++++ paddle/cuda/src/hl_cuda_device.cc | 6 +++++- paddle/pserver/SocketChannel.cpp | 3 ++- paddle/setup.py.in | 2 ++ paddle/utils/ThreadLocal.h | 10 ++-------- paddle/utils/Util.cpp | 6 +++++- paddle/utils/arch/osx/Locks.cpp | 20 ++++++++++++++++++-- 7 files changed, 39 insertions(+), 13 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index bcd734b7e4dbc..cc59309ee7efa 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -71,6 +71,11 @@ foreach(flag ${COMMON_FLAGS}) safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag}) endforeach() +# On Mac OS X build fat binaries with x86_64 architectures by default. +if (APPLE) + set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE) +endif () + # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # So, don't set these flags here. diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index acd8e2fe6afb4..f4c07367b485b 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -211,7 +211,11 @@ bool hl_start_flag = false; inline pid_t gettid() { #if defined(__APPLE__) || defined(__OSX__) - pid_t tid = syscall(SYS_thread_selfid); + // syscall is deprecated: first deprecated in macOS 10.12. + // syscall is unsupported; + // syscall pid_t tid = syscall(SYS_thread_selfid); + uint64_t tid; + pthread_threadid_np(NULL, &tid); #else #ifndef __NR_gettid #define __NR_gettid 224 diff --git a/paddle/pserver/SocketChannel.cpp b/paddle/pserver/SocketChannel.cpp index b9d542a296ddd..20295d7cdc22b 100644 --- a/paddle/pserver/SocketChannel.cpp +++ b/paddle/pserver/SocketChannel.cpp @@ -157,7 +157,8 @@ void SocketChannel::writeMessage(const std::vector& userIovs) { std::vector iovs; iovs.reserve(userIovs.size() + 2); iovs.push_back({&header, sizeof(header)}); - iovs.push_back({&iovLengths[0], sizeof(iovLengths[0]) * header.numIovs}); + iovs.push_back({&iovLengths[0], static_cast( + sizeof(iovLengths[0]) * header.numIovs)}); iovs.insert(iovs.end(), userIovs.begin(), userIovs.end()); header.totalLength = 0; diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 02ea9067431c6..3341dd6f95969 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -18,6 +18,7 @@ from setuptools import setup, Extension import numpy as np import api.paddle_ld_flags import platform +import os system = platform.system().lower() @@ -45,6 +46,7 @@ except: if is_lin == True: extra_links = ["-Xlinker", '-start-group'] + extra_links + ["-Xlinker", "-end-group"] elif is_osx == True: + os.environ["ARCHFLAGS"] = "-arch x86_64" extra_links = ["-Wl,-all_load"] + extra_links include_dirs = [np.get_include(), "../"] # include numpy and paddle. diff --git a/paddle/utils/ThreadLocal.h b/paddle/utils/ThreadLocal.h index 686a1a99a4aa0..b91e4ad5472ca 100644 --- a/paddle/utils/ThreadLocal.h +++ b/paddle/utils/ThreadLocal.h @@ -22,6 +22,7 @@ limitations under the License. */ #include #include #include +#include "Util.h" #include "Logging.h" namespace paddle { @@ -156,14 +157,7 @@ class ThreadLocalD { static void dataDestructor(void* p) { delete (T*)p; } void updateMap(T* p) { -#if defined(__APPLE__) || defined(__OSX__) - pid_t tid = syscall(SYS_thread_selfid); -#else - #ifndef __NR_gettid - #define __NR_gettid 224 - #endif - pid_t tid = syscall(__NR_gettid); -#endif + pid_t tid = getTID(); CHECK_NE(tid, -1); std::lock_guard guard(mutex_); auto ret = threadMap_.insert(std::make_pair(tid, p)); diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index c3c76f907d40e..45251213d2d79 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -95,7 +95,11 @@ namespace paddle { pid_t getTID() { #if defined(__APPLE__) || defined(__OSX__) - pid_t tid = syscall(SYS_thread_selfid); + // syscall is deprecated: first deprecated in macOS 10.12. + // syscall is unsupported; + // syscall pid_t tid = syscall(SYS_thread_selfid); + uint64_t tid; + pthread_threadid_np(NULL, &tid); #else #ifndef __NR_gettid #define __NR_gettid 224 diff --git a/paddle/utils/arch/osx/Locks.cpp b/paddle/utils/arch/osx/Locks.cpp index 47e44e9d7c114..44bab7198d5f4 100644 --- a/paddle/utils/arch/osx/Locks.cpp +++ b/paddle/utils/arch/osx/Locks.cpp @@ -16,6 +16,11 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include #include + +#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 +#include +#endif + namespace paddle { class SemaphorePrivate { @@ -50,21 +55,32 @@ void Semaphore::post() { class SpinLockPrivate { public: +#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 + os_unfair_lock lock_; +#else SpinLockPrivate(): lock_(OS_SPINLOCK_INIT) {} - OSSpinLock lock_; - char padding_[64 - sizeof(OSSpinLock)]; // Padding to cache line size +#endif + char padding_[64 - sizeof(lock_)]; // Padding to cache line size }; SpinLock::SpinLock(): m(new SpinLockPrivate()) {} SpinLock::~SpinLock() { delete m; } void SpinLock::lock() { +#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 + os_unfair_lock_lock(&m->lock_); +#else OSSpinLockLock(&m->lock_); +#endif } void SpinLock::unlock() { +#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 + os_unfair_lock_unlock(&m->lock_); +#else OSSpinLockUnlock(&m->lock_); +#endif } From 8a044d2e2d607f183c173d8fb4d16c3db06b1418 Mon Sep 17 00:00:00 2001 From: stoneyang Date: Sun, 9 Oct 2016 10:27:12 +0800 Subject: [PATCH 012/180] typo in image classification demo (#167) --- demo/image_classification/vgg_16_cifar.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 demo/image_classification/vgg_16_cifar.py diff --git a/demo/image_classification/vgg_16_cifar.py b/demo/image_classification/vgg_16_cifar.py old mode 100644 new mode 100755 index 238608c3cbede..e8b8af4bd313d --- a/demo/image_classification/vgg_16_cifar.py +++ b/demo/image_classification/vgg_16_cifar.py @@ -44,7 +44,7 @@ label_size=10 img = data_layer(name='image', size=data_size) -# small_vgg is predined in trainer_config_helpers.network +# small_vgg is predefined in trainer_config_helpers.networks predict = small_vgg(input_image=img, num_channels=3, num_classes=label_size) From 191fafe355717c506663b5ddd13d8a18ae944924 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Sun, 9 Oct 2016 13:53:30 +0800 Subject: [PATCH 013/180] support rectangle padding, stride, window and input for PoolProjection (#115) * support rectangle padding, stride, window and input for PoolProjection * Follow comments. 1. Remove start 2. refine img_pool_a/b.conf for test_NetworkCompare 3. Split unit test * Modify the test in img_layers.py --- paddle/cuda/include/hl_cnn.h | 88 ++++--- paddle/cuda/include/stub/hl_cnn_stub.h | 48 ++-- paddle/cuda/src/hl_cuda_cnn.cu | 237 +++++++++++------- paddle/gserver/layers/CudnnPoolLayer.cpp | 1 - paddle/gserver/layers/CudnnPoolLayer.h | 10 - paddle/gserver/layers/PoolLayer.cpp | 17 -- paddle/gserver/layers/PoolLayer.h | 12 +- paddle/gserver/layers/PoolProjectionLayer.cpp | 28 ++- paddle/gserver/tests/img_pool_a.conf | 46 ++++ paddle/gserver/tests/img_pool_b.conf | 44 ++++ paddle/gserver/tests/test_LayerGrad.cpp | 36 +-- paddle/gserver/tests/test_NetworkCompare.cpp | 14 ++ paddle/math/Matrix.cpp | 155 +++++++----- paddle/math/Matrix.h | 79 ++++-- paddle/math/tests/test_matrixCompare.cpp | 153 +++++++++++ proto/ModelConfig.proto.m4 | 3 +- python/paddle/trainer/config_parser.py | 25 +- .../paddle/trainer_config_helpers/layers.py | 43 +++- .../paddle/trainer_config_helpers/networks.py | 12 +- .../paddle/trainer_config_helpers/poolings.py | 23 +- .../tests/configs/check.md5 | 2 +- .../tests/configs/img_layers.py | 6 +- 22 files changed, 757 insertions(+), 325 deletions(-) create mode 100644 paddle/gserver/tests/img_pool_a.conf create mode 100644 paddle/gserver/tests/img_pool_b.conf diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index dcae62d06b26d..5d750333e1e35 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -84,16 +84,23 @@ extern void hl_expand_feature2col( * @param[in] width image width. * @param[in] pooledH output image height. * @param[in] pooledW output image width. - * @param[in] sizeX size of pooling window. - * @param[in] stride pooling stride. - * @param[in] start pooling start. + * @param[in] sizeX width of pooling window. + * @param[in] sizeY height of pooling window. + * @param[in] strideH pooling stride height. + * @param[in] strideW pooling stride width. + * @param[in] paddingH padding height. + * @param[in] paddingW padding width. * @param[out] tgtData output data. * */ extern void hl_maxpool_forward( - int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData); + const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, real* tgtData); /** * @brief Maximum pool backward. @@ -107,21 +114,28 @@ extern void hl_maxpool_forward( * @param[in] width image width. * @param[in] pooledH output image height. * @param[in] pooledW output image width. - * @param[in] sizeX size of pooling window. - * @param[in] stride pooling stride. - * @param[in] start pooling start. - * @param[out] targetGrad output grad. + * @param[in] sizeX width of pooling window. + * @param[in] sizeY height of pooling window. + * @param[in] strideH pooling stride height. + * @param[in] strideW pooling stride width. * @param[in] scaleA scale. * @param[in] scaleB scale. + * @param[in] paddingH padding height. + * @param[in] paddingW padding width. + * @param[out] targetGrad output grad. * */ extern void hl_maxpool_backward( - int frameCnt, const real* inputData, + const int frameCnt, const real* inputData, const real* outData, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* targetGrad, - real scaleA, real scaleB); + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, + real scaleA, real scaleB, + real* targetGrad); /** * @brief Averge pool forward. @@ -133,16 +147,23 @@ extern void hl_maxpool_backward( * @param[in] width image width. * @param[in] pooledH output image height. * @param[in] pooledW output image width. - * @param[in] sizeX size of pooling window. - * @param[in] stride pooling stride. - * @param[in] start pooling start. + * @param[in] sizeX width of pooling window. + * @param[in] sizeY height of pooling window. + * @param[in] strideH pooling stride height. + * @param[in] strideW pooling stride width. + * @param[in] paddingH padding height. + * @param[in] paddingW padding width. * @param[out] tgtData output data. * */ extern void hl_avgpool_forward( - int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData); + const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, real* tgtData); /** * @brief Maximum pool backward. @@ -154,20 +175,27 @@ extern void hl_avgpool_forward( * @param[in] width image width. * @param[in] pooledH output image height. * @param[in] pooledW output image width. - * @param[in] sizeX size of pooling window. - * @param[in] stride pooling stride. - * @param[in] start pooling start. - * @param[out] backGrad output grad. + * @param[in] sizeX width of pooling window. + * @param[in] sizeY height of pooling window. + * @param[in] strideH pooling stride height. + * @param[in] strideW pooling stride width. + * @param[in] paddingH padding height. + * @param[in] paddingW padding width. * @param[in] scaleA scale. * @param[in] scaleB scale. + * @param[out] backGrad output grad. * */ extern void hl_avgpool_backward( - int frameCnt, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* backGrad, - real scaleA, real scaleB); + const int frameCnt, const real* outGrad, + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + int paddingH, int paddingW, + real scaleA, real scaleB, + real* backGrad); /** * @brief Cross-map-respose normalize forward. diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index e4d46e4fb186e..38e359c3eb2f3 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -38,29 +38,45 @@ inline void hl_expand_feature2col( real* dataCol) {} inline void hl_maxpool_forward( - int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData) {} + const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, real* tgtData) {} inline void hl_maxpool_backward( - int frameCnt, const real* inputData, + const int frameCnt, const real* inputData, const real* outData, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* targetGrad, - real scaleA, real scaleB) {} + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, + real scaleA, real scaleB, + real* targetGrad) {} inline void hl_avgpool_forward( - int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData) {} + const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, real* tgtData) {} inline void hl_avgpool_backward( - int frameCnt, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* backGrad, - real scaleA, real scaleB) {} + const int frameCnt, const real* outGrad, + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + int paddingH, int paddingW, + real scaleA, real scaleB, + real* backGrad) {} inline void hl_CMRNorm_forward( size_t frameCnt, const real* in, real* scale, real* out, diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index b3695a2c7f88e..abac83a3e0447 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -145,24 +145,28 @@ void hl_shrink_col2feature(const real * dataCol, size_t channels, CHECK_SYNC("hl_shrink_col2feature failed"); } -__global__ void KeMaxPoolForward(int nthreads, const real* inputData, - int channels, int height, int width, - int pooledH, int pooledW, - int ksize, int stride, int start, +__global__ void KeMaxPoolForward(const int nthreads, const real* inputData, + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int ksizeW, const int ksizeH, + const int strideH, const int strideW, + const int offsetH, const int offsetW, real* tgtData) { - int index = blockIdx.y * blockDim.x + threadIdx.x; + int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % pooledW; int ph = (index / pooledW) % pooledH; int c = (index / pooledW / pooledH) % channels; - int frameNum = blockIdx.x; - int hstart = ph * stride + start; - int hend = min(hstart + ksize, height); - int wstart = pw * stride + start; - int wend = min(wstart + ksize, width); + int frameNum = index / pooledW / pooledH / channels; + int hstart = ph * strideH - offsetH; + int wstart = pw * strideW - offsetW; + int hend = min(hstart + ksizeH, height); + int wend = min(wstart + ksizeW, width); + hstart = max(hstart, 0); + wstart = max(wstart, 0); real maxval = -FLT_MAX; inputData += (frameNum * channels + c) * height * width; - tgtData += (frameNum * channels) * pooledW * pooledH; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (maxval < inputData[h * width + w]) @@ -173,44 +177,54 @@ __global__ void KeMaxPoolForward(int nthreads, const real* inputData, } } -void hl_maxpool_forward(int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData) { - int num_kernels = pooledH * pooledW * channels; - int blocksX = frameCnt; - int blocksY = (num_kernels + 1024 -1) / 1024; +void hl_maxpool_forward(const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, + real* tgtData) { + + int num_kernels = pooledH * pooledW * channels * frameCnt; + int blocks = (num_kernels + 1024 - 1) / 1024; dim3 threads(1024, 1); - dim3 grid(blocksX, blocksY); + dim3 grid(blocks, 1); + KeMaxPoolForward<<< grid, threads, 0, STREAM_DEFAULT >>> (num_kernels, inputData, channels, height, width, - pooledH, pooledW, sizeX, stride, start, tgtData); + pooledH, pooledW, sizeX, sizeY, strideH, strideW, + paddingH, paddingW, tgtData); CHECK_SYNC("hl_maxpool_forward failed"); } -__global__ void KeMaxPoolBackward(int nthreads, const real* inputData, +__global__ void KeMaxPoolBackward(const int nthreads, const real* inputData, const real* outData, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* targetGrad, - real scaleA, real scaleB) { - int index = blockIdx.y * blockDim.x + threadIdx.x; + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int padH, const int padW, + real scaleA, real scaleB, + real* targetGrad) { + int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { // find out the local index // find out the local offset - int offsetW = index % width + start; - int offsetH = (index / width) % height + start; + int offsetW = index % width + padW; + int offsetH = (index / width) % height + padH; int offsetC = (index / width / height) % channels; - int frameNum = blockIdx.x; - int phstart = (offsetH < sizeX) ? 0 : (offsetH - sizeX) / stride + 1; - int phend = min(offsetH / stride + 1, pooledH); - int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / stride + 1; - int pwend = min(offsetW / stride + 1, pooledW); + + int frameNum = index / width / height / channels; + int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1; + int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1; + int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0; + int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0; real gradient = 0; - inputData += (frameNum * channels) * height * width; real input = inputData[index]; outData += (frameNum * channels + offsetC) * pooledH * pooledW; outGrad += (frameNum * channels + offsetC) * pooledH * pooledW; - targetGrad += (frameNum * channels) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (input == outData[ph * pooledW + pw]) { @@ -223,90 +237,114 @@ __global__ void KeMaxPoolBackward(int nthreads, const real* inputData, } } -void hl_maxpool_backward(int frameCnt, const real* inputData, +void hl_maxpool_backward(const int frameCnt, const real* inputData, const real* outData, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* targetGrad, - real scaleA, real scaleB) { - int num_kernels = (height - start) * (width - start) * channels; - int blocksX = frameCnt; - int blocksY = (num_kernels + 1024 -1) / 1024; - dim3 threads(1024, 1); - dim3 grid(blocksX, blocksY); + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, + real scaleA, real scaleB, + real* targetGrad) { - KeMaxPoolBackward<<< grid, threads, 0, STREAM_DEFAULT >>> + int num_kernels = height * width * channels * frameCnt; + int blocks = (num_kernels + 1024 - 1) / 1024; + + KeMaxPoolBackward<<< blocks, 1024, 0, STREAM_DEFAULT >>> (num_kernels, inputData, outData, outGrad, channels, - height, width, pooledH, pooledW, sizeX, stride, start, - targetGrad, scaleA, scaleB); + height, width, pooledH, pooledW, sizeX, sizeY, + strideH, strideW, + paddingH, paddingW, + scaleA, scaleB, + targetGrad); CHECK_SYNC("hl_maxpool_backward"); } -__global__ void KeAvePoolForward(int nthreads, const real* inputData, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* tgtData) { - int index = blockIdx.y * blockDim.x + threadIdx.x; +__global__ void KeAvgPoolForward(const int nthreads, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int padH, const int padW, + real* tgtData) { + int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % pooledW; int ph = (index / pooledW) % pooledH; int c = (index / pooledW / pooledH) % channels; - int frameNum = blockIdx.x; - int hstart = ph * stride + start; - int hend = min(hstart + sizeX, height); - int wstart = pw * stride + start; - int wend = min(wstart + sizeX, width); + int frameNum = index / pooledW / pooledH / channels; + + int hstart = ph * strideH - padH; + int wstart = pw * strideW - padW; + int hend = min(hstart + sizeY, height + padH); + int wend = min(wstart + sizeX, width + padW); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, height); + wend = min(wend, width); + real aveval = 0; inputData += (frameNum * channels + c) * height * width; - tgtData += (frameNum * channels) * pooledH * pooledW; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += inputData[h * width + w]; } } - tgtData[index] = aveval / ((hend - hstart) * (wend - wstart)); + tgtData[index] = aveval / pool_size; } } -void hl_avgpool_forward(int frameCnt, const real* inputData, int channels, - int height, int width, int pooledH, int pooledW, - int sizeX, int stride, int start, real* tgtData) { - int num_kernels = pooledH * pooledW * channels; - int blocksX = frameCnt; - int blocksY = (num_kernels + 1024 -1) / 1024; - dim3 threads(1024, 1); - dim3 grid(blocksX, blocksY); - KeAvePoolForward<<< grid, threads, 0, STREAM_DEFAULT >>> +void hl_avgpool_forward(const int frameCnt, const real* inputData, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, real* tgtData) { + int num_kernels = pooledH * pooledW * channels * frameCnt; + int blocks = (num_kernels + 1024 - 1) / 1024; + KeAvgPoolForward<<< blocks, 1024, 0, STREAM_DEFAULT >>> (num_kernels, inputData, channels, height, width, pooledH, pooledW, - sizeX, stride, start, tgtData); + sizeX, sizeY, strideH, strideW, + paddingH, paddingW, tgtData); CHECK_SYNC("hl_avgpool_forward failed"); } -__global__ void KeAvgPoolBackward(int nthreads, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* tgtGrad, - real scaleA, real scaleB) { - int index = blockIdx.y * blockDim.x + threadIdx.x; +__global__ void KeAvgPoolBackward(const int nthreads, const real* outGrad, + const int channels, const int height, + const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int padH, const int padW, + real scaleA, real scaleB, + real* tgtGrad) { + int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { - int offsetW = index % width + start; - int offsetH = (index / width) % height + start; + int offsetW = index % width + padW; + int offsetH = (index / width) % height + padH; int offsetC = (index / width / height) % channels; - int frameNum = blockIdx.x; - int phstart = (offsetH < sizeX) ? 0 : (offsetH - sizeX) / stride + 1; - int phend = min(offsetH / stride + 1, pooledH); - int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / stride + 1; - int pwend = min(offsetW / stride + 1, pooledW); + int frameNum = index / width / height / channels; + + int phstart = (offsetH < sizeY) ? 0 : (offsetH - sizeY) / strideH + 1; + int pwstart = (offsetW < sizeX) ? 0 : (offsetW - sizeX) / strideW + 1; + int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0; + int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0; real gradient = 0; outGrad += (frameNum * channels + offsetC) * pooledH * pooledW; - tgtGrad += (frameNum * channels) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size - int poolsize = (min(ph * stride + sizeX, height) - ph * stride) * - (min(pw * stride + sizeX, width) - pw * stride); + int hstart = ph * strideH - padH; + int wstart = pw * strideW - padW; + int hend = min(hstart + sizeY, height + padH); + int wend = min(wstart + sizeX, width + padW); + int poolsize = (hend - hstart) * (wend - wstart); gradient += outGrad[ph * pooledW + pw]/poolsize; } } @@ -314,20 +352,25 @@ __global__ void KeAvgPoolBackward(int nthreads, const real* outGrad, } } -void hl_avgpool_backward(int frameCnt, const real* outGrad, - int channels, int height, int width, - int pooledH, int pooledW, int sizeX, - int stride, int start, real* backGrad, - real scaleA, real scaleB) { - int num_kernels = (height - start) * (width - start) * channels; - int blocksX = frameCnt; - int blocksY = (num_kernels + 1024 -1) / 1024; - dim3 threads(1024, 1); - dim3 grid(blocksX, blocksY); +void hl_avgpool_backward(const int frameCnt, const real* outGrad, + const int channels, + const int height, const int width, + const int pooledH, const int pooledW, + const int sizeX, const int sizeY, + const int strideH, const int strideW, + const int paddingH, const int paddingW, + real scaleA, real scaleB, + real* backGrad) { + int num_kernels = height * width * channels * frameCnt; + int blocks = (num_kernels + 1024 - 1) / 1024; - KeAvgPoolBackward <<< grid, threads, 0, STREAM_DEFAULT >>> + KeAvgPoolBackward <<< blocks, 1024, 0, STREAM_DEFAULT >>> (num_kernels, outGrad, channels, height, width, - pooledH, pooledW, sizeX, stride, start, backGrad, scaleA, scaleB); + pooledH, pooledW, sizeX, sizeY, + strideH, strideW, + paddingH, paddingW, + scaleA, scaleB, + backGrad); CHECK_SYNC("hl_avgpool_backward failed"); } diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/gserver/layers/CudnnPoolLayer.cpp index 86c056ef5692a..4c733591b3779 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/gserver/layers/CudnnPoolLayer.cpp @@ -51,7 +51,6 @@ bool CudnnPoolLayer::init(const LayerMap &layerMap, PoolLayer::init(layerMap, parameterMap); CHECK(useGpu_) << "CudnnPoolLayer only support gpu"; - CHECK_EQ(start_, 0) << poolType_ << " dose not support 'start'"; hl_create_tensor_descriptor(&inputDesc_); hl_create_tensor_descriptor(&outputDesc_); diff --git a/paddle/gserver/layers/CudnnPoolLayer.h b/paddle/gserver/layers/CudnnPoolLayer.h index df97ef2edfd01..2ef94720d2b9f 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.h +++ b/paddle/gserver/layers/CudnnPoolLayer.h @@ -56,16 +56,6 @@ class CudnnPoolLayer : public PoolLayer { void reshape(int batchSize); virtual void forward(PassType passType); virtual void backward(const UpdateCallback& callback = nullptr); - - /** - * Calculate output size according window size of pooling. - */ - int outputSize(int imageSize, int windowSize, int padding, int stride) { - int outputSize; - outputSize = - (imageSize - windowSize + 2 * padding + stride - 1) / stride + 1; - return outputSize; - } }; } // namespace paddle diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 0ff7f374abb4b..7fc27ac0bd8e0 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -35,7 +35,6 @@ bool PoolLayer::init(const LayerMap& layerMap, poolType_ = conf.pool_type(); channels_ = conf.channels(); sizeX_ = conf.size_x(); - start_ = conf.start(); stride_ = conf.stride(); outputX_ = conf.output_x(); imgSize_ = conf.img_size(); @@ -47,22 +46,6 @@ bool PoolLayer::init(const LayerMap& layerMap, confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); - bool cudnnTypeCheck = true; -#ifndef PADDLE_ONLY_CPU - cudnnTypeCheck = !CudnnPoolLayer::typeCheck(poolType_); -#endif - - if ((sizeY_ != sizeX_ || imgSizeY_ != imgSize_ || strideY_ != stride_ || - confPaddingY_ != confPadding_ || outputY_ != outputX_) && - cudnnTypeCheck) { - LOG(FATAL) << poolType_ << " does not supported non-square " - "filter, image, stride or padding"; - } - - if (confPadding_ != 0 && cudnnTypeCheck) { - LOG(FATAL) << poolType_ << " does not supported 'padding'"; - } - return true; } diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/gserver/layers/PoolLayer.h index b7a1dfd7632f9..bde1f5b8dcbfd 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/gserver/layers/PoolLayer.h @@ -28,7 +28,7 @@ namespace paddle { class PoolLayer : public Layer { protected: size_t channels_, sizeX_, stride_, outputX_, imgSize_; - int start_, confPadding_; + int confPadding_; size_t sizeY_; size_t imgSizeY_; @@ -47,6 +47,16 @@ class PoolLayer : public Layer { static Layer* create(const LayerConfig& config); virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + /** + * Calculate output size according window size and padding size. + */ + int outputSize(int imageSize, int windowSize, int padding, int stride) { + int outputSize; + outputSize = + (imageSize - windowSize + 2 * padding + stride - 1) / stride + 1; + return outputSize; + } }; } // namespace paddle diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index 9c2d6d2164a3f..5a2e9afb6e164 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -25,13 +25,15 @@ size_t PoolProjectionLayer::getSize() { imgSizeH_ = inputLayers_[0]->getOutput().getFrameHeight(); imgSizeW_ = inputLayers_[0]->getOutput().getFrameWidth(); if (imgSizeH_ == 0) { - imgSizeH_ = imgSize_; + imgSizeH_ = imgSizeY_; } if (imgSizeW_ == 0) { imgSizeW_ = imgSize_; } - outputH_ = 1 + (imgSizeH_ - start_ - sizeX_ + stride_ - 1) / stride_; - outputW_ = 1 + (imgSizeW_ - start_ - sizeX_ + stride_ - 1) / stride_; + + outputH_ = outputSize(imgSizeH_, sizeY_, confPaddingY_, strideY_); + outputW_ = outputSize(imgSizeW_, sizeX_, confPadding_, stride_); + layerSize = outputH_ * outputW_ * channels_; getOutput().setFrameHeight(outputH_); @@ -51,8 +53,9 @@ void MaxPoolProjectionLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); - outV->maxPoolForward(*input, imgSizeH_, imgSizeW_, channels_, sizeX_, start_, - stride_, outputH_, outputW_); + outV->maxPoolForward(*input, imgSizeH_, imgSizeW_, channels_, + sizeX_, sizeY_, strideY_, stride_, + outputH_, outputW_, confPaddingY_, confPadding_); } void MaxPoolProjectionLayer::backward(const UpdateCallback& callback) { @@ -69,7 +72,9 @@ void MaxPoolProjectionLayer::backward(const UpdateCallback& callback) { MatrixPtr inputGrad = getInputGrad(0); inputGrad->maxPoolBackward(*inputV, imgSizeH_, imgSizeW_, *outGrad, *outV, - sizeX_, start_, stride_, outputH_, outputW_, 1, 1); + sizeX_, sizeY_, + strideY_, stride_, outputH_, outputW_, 1, 1, + confPaddingY_, confPadding_); } void AvgPoolProjectionLayer::forward(PassType passType) { @@ -84,8 +89,9 @@ void AvgPoolProjectionLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); - outV->avgPoolForward(*input, imgSizeH_, imgSizeW_, channels_, sizeX_, start_, - stride_, outputH_, outputW_); + outV->avgPoolForward(*input, imgSizeH_, imgSizeW_, channels_, + sizeX_, sizeY_, strideY_, stride_, + outputH_, outputW_, confPaddingY_, confPadding_); } void AvgPoolProjectionLayer::backward(const UpdateCallback& callback) { @@ -97,7 +103,9 @@ void AvgPoolProjectionLayer::backward(const UpdateCallback& callback) { /* Do derivation */ MatrixPtr outputGrad = getOutputGrad(); MatrixPtr inputGrad = getInputGrad(0); - inputGrad->avgPoolBackward(*outputGrad, imgSizeH_, imgSizeW_, sizeX_, start_, - stride_, outputH_, outputW_, 1, 1); + inputGrad->avgPoolBackward(*outputGrad, imgSizeH_, imgSizeW_, + sizeX_, sizeY_, strideY_, stride_, + outputH_, outputW_, 1, 1, + confPaddingY_, confPadding_); } } // namespace paddle diff --git a/paddle/gserver/tests/img_pool_a.conf b/paddle/gserver/tests/img_pool_a.conf new file mode 100644 index 0000000000000..5938e7611201c --- /dev/null +++ b/paddle/gserver/tests/img_pool_a.conf @@ -0,0 +1,46 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) +data = data_layer(name ="input", size=8*16*16) +conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=8,stride=1) +maxpool = img_pool_layer(input=conv, + pool_size=3, + pool_size_y=5, + num_channels=8, + stride=1, + stride_y=2, + padding=1, + padding_y=2, + img_width=16, + pool_type=MaxPooling(), +) +avgpool = img_pool_layer(input=conv, + pool_size=3, + pool_size_y=5, + num_channels=8, + stride=1, + stride_y=2, + padding=1, + padding_y=2, + img_width=16, + pool_type=AvgPooling(), +) + +outputs([maxpool, avgpool]) diff --git a/paddle/gserver/tests/img_pool_b.conf b/paddle/gserver/tests/img_pool_b.conf new file mode 100644 index 0000000000000..6ea9649b3f1ea --- /dev/null +++ b/paddle/gserver/tests/img_pool_b.conf @@ -0,0 +1,44 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) +data = data_layer(name ="input", size=8*16*16) +conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, num_filters=8, stride=1) +maxpool = img_pool_layer(input=conv, + pool_size=3, + pool_size_y=5, + num_channels=8, + stride=1, + stride_y=2, + padding=1, + padding_y=2, + pool_type=CudnnMaxPooling(), +) + +avgpool = img_pool_layer(input=conv, + pool_size=3, + pool_size_y=5, + num_channels=8, + stride=1, + stride_y=2, + padding=1, + padding_y=2, + pool_type=CudnnAvgPooling(), +) + +outputs([maxpool, avgpool]) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 3150c31e4900c..c5723f8574ab3 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -791,21 +791,24 @@ void setPoolConfig(TestConfig* config, PoolConfig* pool, (*config).biasSize = 0; (*config).layerConfig.set_type("pool"); (*config).layerConfig.set_num_filters(16); - (*config).layerConfig.set_partial_sum(1); - (*config).layerConfig.set_shared_biases(true); + int kw = 3, kh = 3; + int pw = 0, ph = 0; + int sw = 2, sh = 2; pool->set_pool_type(poolType); pool->set_channels(16); - pool->set_size_x(3); - if (poolType == "cudnn-max-pool" || poolType == "cudnn-avg-pool") { - pool->set_padding(0); - } else { - pool->set_start(0); - } - pool->set_stride(2); - pool->set_output_x((pool->img_size() - pool->start() - pool->size_x()) / - ((float)pool->stride()) + - 1.5); + pool->set_size_x(kw); + pool->set_size_y(kh); + pool->set_start(0); + pool->set_padding(pw); + pool->set_padding_y(ph); + pool->set_stride(sw); + pool->set_stride_y(sh); + + int ow = (pool->img_size() - kw + 2 * pw + sw - 1) / sw + 1; + int oh = (pool->img_size_y() - kh + 2 * ph + sh - 1) / sh + 1; + pool->set_output_x(ow); + pool->set_output_y(oh); } void testPoolLayer(const string& poolType, bool trans, bool useGpu) { @@ -814,9 +817,10 @@ void testPoolLayer(const string& poolType, bool trans, bool useGpu) { LayerInputConfig* input = config.layerConfig.add_inputs(); PoolConfig* pool = input->mutable_pool_conf(); - setPoolConfig(&config, pool, poolType); pool->set_img_size(14); - config.layerConfig.set_size(pool->output_x() * pool->output_x() * + pool->set_img_size_y(14); + setPoolConfig(&config, pool, poolType); + config.layerConfig.set_size(pool->output_x() * pool->output_y() * pool->channels()); testLayerGrad(config, "pool", 100, trans, useGpu); @@ -829,11 +833,11 @@ void testPoolLayer2(const string& poolType, bool trans, bool useGpu) { LayerInputConfig* input = config.layerConfig.add_inputs(); PoolConfig* pool = input->mutable_pool_conf(); - setPoolConfig(&config, pool, poolType); pool->set_size_y(4); pool->set_stride_y(3); pool->set_img_size(10); pool->set_img_size_y(20); + setPoolConfig(&config, pool, poolType); pool->set_output_y((pool->img_size_y() - pool->start() - pool->size_y()) / ((float)pool->stride_y()) + 1.5); @@ -1252,8 +1256,6 @@ TEST(Layer, MultiplexLayer) { } } - - int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index 1c6a8b0017fc9..b3ef53067301b 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -116,6 +116,8 @@ void calcGradient(DataIn& in, DataOut& out, const std::string& configPath) { gradientMachine->start(trainer.getConfig(), nullptr); gradientMachine->forward(in.inArgs, &outArgs, PASS_TRAIN); for (size_t i = 0; i < in.outGrads.size(); i++) { + // If the all the layers in the config have no parameters, also + // not set NeedGradient(), the outArgs[i] will be nullptr. outArgs[i].grad->copyFrom(*in.outGrads[i]); } gradientMachine->backward(); @@ -225,6 +227,18 @@ TEST(Compare, concat_table) { compareNetwork(config_file_a, config_file_b); } +#ifndef PADDLE_ONLY_CPU +TEST(Compare, img_pool) { + std::string config_file_a = "./gserver/tests/img_pool_a.conf"; + std::string config_file_b = "./gserver/tests/img_pool_b.conf"; + bool useGpu = FLAGS_use_gpu; + FLAGS_use_gpu = true; + compareNetwork(config_file_a, config_file_b); + FLAGS_use_gpu = useGpu; +} +#endif + + P_DEFINE_string(config_file_a, "", "config of one network to compare"); P_DEFINE_string(config_file_b, "", "config of another network to compare"); TEST(Compare, network) { diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index e351bede724ac..a6ff2f3b35d04 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -860,9 +860,11 @@ void GpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, } void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, size_t sizeX, - int start, size_t stride, size_t outputH, - size_t outputW) { + size_t imgSizeW, size_t channels, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal"; real* inputData = inputMat.getData(); @@ -874,14 +876,17 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, CHECK(width_ == outputH * outputW * channels); hl_maxpool_forward(frameNum, inputData, channels, height, width, - outputH, outputW, sizeX, stride, start, data_); + outputH, outputW, sizeX, sizeY, strideH, strideW, + paddingH, paddingW, data_); } void GpuMatrix::maxPoolBackward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, Matrix& outGrad, Matrix& outV, - size_t sizeX, int start, size_t stride, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput) { + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { CHECK(inputMat.useGpu_ == true && outGrad.useGpu_ == true && outV.useGpu_ == true) << "Matrix type are not equal"; @@ -899,15 +904,19 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat, size_t imgSizeH, CHECK(outGrad.getHeight() == outV.getHeight() && outGrad.getWidth() == outV.getWidth()); + hl_maxpool_backward(frameNum, inputData, outData, outDiff, channels, - height, width, outputH, outputW, sizeX, stride, - start, data_, scaleTargets, scaleOutput); + height, width, outputH, outputW, sizeX, sizeY, + strideH, strideW, paddingH, paddingW, + scaleTargets, scaleOutput, data_); } void GpuMatrix::avgPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, size_t sizeX, - int start, size_t stride, size_t outputH, - size_t outputW) { + size_t imgSizeW, size_t channels, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal"; real* inputData = inputMat.getData(); @@ -919,13 +928,17 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat, size_t imgSizeH, CHECK(width_ == outputH * outputW * channels); hl_avgpool_forward(frameNum, inputData, channels, height, width, - outputH, outputW, sizeX, stride, start, data_); + outputH, outputW, sizeX, sizeY, + strideH, strideW, + paddingH, paddingW, data_); } void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, - size_t imgSizeW, size_t sizeX, int start, - size_t stride, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput) { + size_t imgSizeW, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { CHECK(outGrad.useGpu_ == true) << "Matrix type are not equal"; real* outDiff = outGrad.getData(); @@ -938,8 +951,10 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, CHECK(outGrad.getWidth() == outputH * outputW * channels); hl_avgpool_backward(frameNum, outDiff, channels, height, width, - outputH, outputW, sizeX, stride, start, data_, - scaleTargets, scaleOutput); + outputH, outputW, sizeX, sizeY, + strideH, strideW, paddingH, paddingW, + scaleTargets, scaleOutput, + data_); } void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, @@ -1450,19 +1465,23 @@ void CpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, } void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, size_t sizeX, - int start, size_t stride, size_t outputH, - size_t outputW) { + size_t imgSizeW, size_t channels, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { real* inputData = inputMat.getData(); real* outData = data_; size_t num = inputMat.getHeight(); size_t inWidth = imgSizeW; size_t inHeight = imgSizeH; CHECK(inHeight * inWidth == inputMat.getWidth() / channels); + CHECK_EQ(num, this->getHeight()); + CHECK_EQ(channels*outputH*outputW, this->getWidth()); /* initialize the data_ */ for (size_t i = 0; i < height_ * width_; i++) { - data_[i] = -FLT_MAX; + outData[i] = -(real)FLT_MAX; } /* pool max one by one */ @@ -1470,12 +1489,14 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, for (size_t c = 0; c < channels; ++c) { // channel by channel for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { - size_t hstart = ph * stride + start; - size_t wstart = pw * stride + start; - size_t hend = std::min(hstart + sizeX, inHeight); - size_t wend = std::min(wstart + sizeX, inWidth); - for (size_t h = hstart; h < hend; ++h) { - for (size_t w = wstart; w < wend; ++w) { + int hstart = ph * strideH - paddingH; + int wstart = pw * strideW - paddingW; + int hend = std::min(hstart + sizeY, inHeight); + int wend = std::min(wstart + sizeX, inWidth); + hstart = std::max(hstart, 0); + wstart = std::max(wstart, 0); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { outData[ph * outputW + pw] = std::max(outData[ph * outputW + pw], inputData[h * inWidth + w]); } @@ -1491,9 +1512,10 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, void CpuMatrix::maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, Matrix& outGrad, Matrix& outV, size_t sizeX, - int start, size_t stride, size_t outputH, - size_t outputW, real scaleTargets, - real scaleOutput) { + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { size_t num = image.getHeight(); size_t channels = size_t(width_ / imgSizeH / imgSizeW); CHECK(image.getWidth() == imgSizeH * imgSizeW * channels); @@ -1509,32 +1531,36 @@ void CpuMatrix::maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { - size_t hstart = ph * stride + start; - size_t wstart = pw * stride + start; - size_t hend = std::min(hstart + sizeX, imgSizeH); - size_t wend = std::min(wstart + sizeX, imgSizeW); - for (size_t h = hstart; h < hend; ++h) { - for (size_t w = wstart; w < wend; ++w) { + int hstart = ph * strideH - paddingH; + int wstart = pw * strideW - paddingW; + int hend = std::min(hstart + sizeY, imgSizeH); + int wend = std::min(wstart + sizeX, imgSizeW); + hstart = std::max(hstart, 0); + wstart = std::max(wstart, 0); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { tgtGrad[h * imgSizeW + w] = scaleTargets * tgtGrad[h * imgSizeW + w] + scaleOutput * otGrad[ph * outputW + pw] * - (inData[h * imgSizeW + w] == otData[ph * outputH + pw]); + (inData[h * imgSizeW + w] == otData[ph * outputW + pw]); } } } } // offset inData += imgSizeH * imgSizeW; - otData += outputH * outputW; tgtGrad += imgSizeH * imgSizeW; + otData += outputH * outputW; otGrad += outputH * outputW; } } } void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start, - size_t stride, size_t outputH, size_t outputW) { + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { // The main loop size_t num = input.getHeight(); size_t inHeight = imgSizeH; @@ -1548,17 +1574,24 @@ void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { - size_t hstart = ph * stride + start; - size_t wstart = pw * stride + start; - size_t hend = std::min(hstart + sizeX, inHeight); - size_t wend = std::min(wstart + sizeX, inWidth); + int hstart = ph * strideH - paddingH; + int wstart = pw * strideW - paddingW; + int hend = std::min(hstart + sizeY, inHeight + paddingH); + int wend = std::min(wstart + sizeX, inWidth + paddingW); + int poolSize = (hend - hstart) * (wend - wstart); + hstart = std::max(hstart, 0); + wstart = std::max(wstart, 0); + hend = std::min(hend, static_cast(inHeight)); + wend = std::min(wend, static_cast(inWidth)); + + CHECK(poolSize); tgtData[ph * outputW + pw] = 0; // clear - for (size_t h = hstart; h < hend; ++h) { - for (size_t w = wstart; w < wend; ++w) { + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { tgtData[ph * outputW + pw] += inData[h * inWidth + w]; } } - tgtData[ph * outputW + pw] /= (hend - hstart) * (wend - wstart); + tgtData[ph * outputW + pw] /= poolSize; } } // compute offset @@ -1569,9 +1602,11 @@ void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, } void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t sizeX, int start, size_t stride, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput) { + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { size_t num = input.getHeight(); size_t channels = input.getWidth() / outputH / outputW; CHECK(imgSizeH * imgSizeW * channels == getWidth()); @@ -1582,14 +1617,20 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { - size_t hstart = ph * stride + start; - size_t wstart = pw * stride + start; - size_t hend = std::min(hstart + sizeX, imgSizeH); - size_t wend = std::min(wstart + sizeX, imgSizeW); - size_t poolsize = (hend - hstart) * (wend - wstart); - for (size_t h = hstart; h < hend; ++h) { - for (size_t w = wstart; w < wend; ++w) { - outData[h * imgSizeW + w] += inData[ph * outputW + pw] / poolsize; + int hstart = ph * strideH - paddingH; + int wstart = pw * strideW - paddingW; + int hend = std::min(hstart + sizeY, imgSizeH + paddingH); + int wend = std::min(wstart + sizeX, imgSizeW + paddingW); + int poolSize = (hend - hstart) * (wend - wstart); + hstart = std::max(hstart, 0); + wstart = std::max(wstart, 0); + hend = std::min(hend, static_cast(imgSizeH)); + wend = std::min(wend, static_cast(imgSizeW)); + CHECK(poolSize); + + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + outData[h * imgSizeW + w] += inData[ph * outputW + pw] / poolSize; } } } diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index cfb30797fcf1b..5c15c94012816 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -742,31 +742,37 @@ class Matrix : public BaseMatrix { */ virtual void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, size_t channels, size_t sizeX, - int start_, size_t stride, size_t outputH, - size_t outputW) { + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { LOG(FATAL) << "Not implemeted"; } /// Pooling backward operation. virtual void maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, Matrix& outGrad, Matrix& outV, size_t sizeX, - int start, size_t stride, size_t outputH, - size_t outputW, real scaleTargets, - real scaleOutput) { + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { LOG(FATAL) << "Not implemeted"; } /// Pooling forward operation, caculate the average of sizeX elements. virtual void avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start, - size_t stride, size_t outputH, size_t outputW) { + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW) { LOG(FATAL) << "Not implemeted"; } virtual void avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t sizeX, int start, size_t stride, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput) { + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW) { LOG(FATAL) << "Not implemeted"; } @@ -1131,21 +1137,30 @@ class GpuMatrix : public Matrix { real alpha = 1.0f, real beta = 0.0f); void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start_, size_t stride, - size_t outputH, size_t outputW); + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW); void maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, - Matrix& outGrad, Matrix& outV, size_t sizeX, int start, - size_t stride, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput); + Matrix& outGrad, Matrix& outV, size_t sizeX, + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW); void avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start, size_t stride, - size_t outputH, size_t outputW); + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW); void avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t sizeX, int start, size_t stride, size_t outputH, - size_t outputW, real scaleTargets, real scaleOutput); + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW); void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, @@ -1242,21 +1257,31 @@ class CpuMatrix : public Matrix { real alpha = 1.0f, real beta = 0.0f); void maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start_, size_t stride, - size_t outputH, size_t outputW); + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW); void maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, - Matrix& outGrad, Matrix& outV, size_t sizeX, int start, - size_t stride, size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput); + Matrix& outGrad, Matrix& outV, + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW); void avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t channels, size_t sizeX, int start, size_t stride, - size_t outputH, size_t outputW); + size_t channels, size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + size_t paddingH, size_t paddingW); void avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t sizeX, int start, size_t stride, size_t outputH, - size_t outputW, real scaleTargets, real scaleOutput); + size_t sizeX, size_t sizeY, + size_t strideH, size_t strideW, + size_t outputH, size_t outputW, + real scaleTargets, real scaleOutput, + size_t paddingH, size_t paddingW); void crossMapNormalFwd(Matrix& input, size_t imgSizeH, size_t imgSizeW, Matrix& denoms, size_t channels, size_t sizeX, diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index fe8eacc2efbc5..e1bda79a8acb1 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -1846,6 +1846,159 @@ TEST(Matrix, classificationError) { } } +void testMaxPoolFwdBwd(int numSamples, int channels, + int imgSizeH, int imgSizeW, + int ksizeH, int ksizeW, + int strideH, int strideW, + int padH, int padW) { + int outH = 0, outW = 0; + outH = (imgSizeH - ksizeH + 2 * padH + strideH - 1) / strideH + 1; + outW = (imgSizeW - ksizeW + 2 * padW + strideW - 1) / strideW + 1; + + int inWidth = imgSizeH * imgSizeW * channels; + MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); + + int outWidth = channels * outH * outW; + MatrixPtr target = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpu = GpuMatrix::create(numSamples, outWidth, false, true); + + input->randomizeUniform(); + target->randomizeUniform(); + inputGpu->copyFrom(*input); + targetGpu->copyFrom(*target); + + target->maxPoolForward(*input, imgSizeH, imgSizeW, + channels, ksizeW, ksizeH, + strideH, strideW, outH, outW, padH, padW); + targetGpu->maxPoolForward(*inputGpu, imgSizeH, imgSizeW, + channels, ksizeW, ksizeH, + strideH, strideW, outH, outW, padH, padW); + MatrixPtr targetCheck = CpuMatrix::create(numSamples, outWidth, false, false); + targetCheck->copyFrom(*targetGpu); + checkMatrixEqual(target, targetCheck); + + MatrixPtr inputGrad = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); + MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, + false, true); + + inputGrad->randomizeUniform(); + targetGrad->randomizeUniform(); + inputGpuGrad->copyFrom(*inputGrad); + targetGpuGrad->copyFrom(*targetGrad); + + inputGrad->maxPoolBackward(*input, imgSizeH, imgSizeW, + *targetGrad, *target, + ksizeW, ksizeH, + strideH, strideW, + outH, outW, 1.0, 1.0, padH, padW); + inputGpuGrad->maxPoolBackward(*inputGpu, imgSizeH, imgSizeW, + *targetGpuGrad, *targetGpu, + ksizeW, ksizeH, + strideH, strideW, + outH, outW, 1.0, 1.0, padH, padW); + MatrixPtr targetBwdCheck = CpuMatrix::create(numSamples, inWidth, + false, false); + targetBwdCheck->copyFrom(*inputGpuGrad); + checkMatrixEqual(inputGrad, targetBwdCheck); +} + +void testAvgPoolFwdBwd(int numSamples, int channels, + int imgSizeH, int imgSizeW, + int ksizeH, int ksizeW, + int strideH, int strideW, + int padH, int padW) { + int outH = 0, outW = 0; + outH = (imgSizeH - ksizeH + 2 * padH + strideH - 1) / strideH + 1; + outW = (imgSizeW - ksizeW + 2 * padW + strideW - 1) / strideW + 1; + + int inWidth = imgSizeH * imgSizeW * channels; + MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); + + int outWidth = channels * outH * outW; + MatrixPtr target = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpu = GpuMatrix::create(numSamples, outWidth, false, true); + + input->randomizeUniform(); + target->randomizeUniform(); + inputGpu->copyFrom(*input); + targetGpu->copyFrom(*target); + + target->avgPoolForward(*input, imgSizeH, imgSizeW, + channels, ksizeW, ksizeH, + strideH, strideW, outH, outW, padH, padW); + targetGpu->avgPoolForward(*inputGpu, imgSizeH, imgSizeW, + channels, ksizeW, ksizeH, + strideH, strideW, outH, outW, padH, padW); + MatrixPtr targetCheck = CpuMatrix::create(numSamples, outWidth, false, false); + targetCheck->copyFrom(*targetGpu); + MatrixCheckErr(*target, *targetCheck); + + MatrixPtr inputGrad = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); + MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, + false, true); + + inputGrad->randomizeUniform(); + targetGrad->randomizeUniform(); + inputGpuGrad->copyFrom(*inputGrad); + targetGpuGrad->copyFrom(*targetGrad); + + inputGrad->avgPoolBackward(*targetGrad, imgSizeH, imgSizeW, + ksizeW, ksizeH, + strideH, strideW, + outH, outW, 1.0, 1.0, padH, padW); + inputGpuGrad->avgPoolBackward(*targetGpuGrad, imgSizeH, imgSizeW, + ksizeW, ksizeH, + strideH, strideW, + outH, outW, 1.0, 1.0, padH, padW); + MatrixPtr targetBwdCheck = CpuMatrix::create(numSamples, inWidth, + false, false); + targetBwdCheck->copyFrom(*inputGpuGrad); + MatrixCheckErr(*inputGrad, *targetBwdCheck); +} + +TEST(Matrix, PoolFwdBwd) { + for (auto numSamples : {5, 32}) { + for (auto channels : {1, 9, 32}) { + for (auto imgSizeH : {14, 28}) { + for (auto imgSizeW : {16, 30}) { + for (auto sizeX : {2, 5}) { + for (auto sizeY : {2, 5}) { + for (auto sH : {1, 2}) { + for (auto sW : {1, 2}) { + for (auto pH : {0, (sizeY - 1)/2}) { + for (auto pW : {0, (sizeX - 1)/2}) { + VLOG(3) << " numSamples=" << numSamples + << " channels=" << channels + << " imgSizeH=" << imgSizeH + << " imgSizeW=" << imgSizeW + << " sizeX=" << sizeX + << " sizeY=" << sizeY + << " strideH=" << sH + << " strideW=" << sW + << " padingH=" << pH + << " padingW=" << pW; + testMaxPoolFwdBwd(numSamples, channels, imgSizeH, + imgSizeW, sizeX, sizeY, sH, sW, pH, pW); + testAvgPoolFwdBwd(numSamples, channels, imgSizeH, + imgSizeW, sizeX, sizeY, sH, sW, pH, pW); + } + } + } + } + } + } + } + } + } + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index b32f8b1ee9072..25e36f9c4c168 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -88,7 +88,8 @@ message PoolConfig { required uint32 size_x = 3; // Tell the net where in the input image to start the pooling. - required uint32 start = 4; + // start is deprecated now. + optional uint32 start = 4; // Defines the stride size between successive pooling squares. required uint32 stride = 5; diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 1f55298f24f07..fb47fd0c6f0c3 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -961,10 +961,6 @@ def parse_pool(pool, input_layer_name, pool_conf): "['max-projection', 'avg-projection', " "'cudnn-max-pool', 'cudnn-avg-pool']" % pool.pool_type) - if pool.size_y or pool.stride_y or pool.img_width or pool.padding_y: - config_assert(pool.pool_type.startswith('cudnn'), - "'size_y', 'stride_y' and 'img_width' and 'padding_y'" - "can only be used for cudnn") pool_conf.channels = pool.channels pool_conf.size_x = pool.size_x @@ -974,36 +970,25 @@ def parse_pool(pool, input_layer_name, pool_conf): pool_conf.stride_y = default(pool.stride_y, pool_conf.stride); img_pixels = g_layer_map[input_layer_name].size / pool.channels + # the img_width may be removed, + # and it can be calculated automatically later. pool_conf.img_size = default(pool.img_width, int(img_pixels ** 0.5)) pool_conf.img_size_y = img_pixels / pool_conf.img_size config_assert(pool_conf.img_size * pool_conf.img_size_y == img_pixels, "Incorrect input image size %d for input image pixels %d" % (pool_conf.img_size, img_pixels)) - if pool.start is not None: - config_assert(pool.padding is None, - 'At most one of start and padding can be set.') - pool_conf.start = pool.start - pool_conf.padding = 0 - pool_conf.output_x = int(math.ceil((pool_conf.img_size - \ - pool_conf.start - pool_conf.size_x) / \ - float(pool_conf.stride))) + 1 + config_assert(not pool.start, "start is deprecated in pooling.") - pool_conf.output_y = int(math.ceil((pool_conf.img_size_y - \ - pool_conf.start - pool_conf.size_y) / \ - float(pool_conf.stride_y))) + 1 - elif pool.padding is not None: + if pool.padding is not None: pool_conf.padding = pool.padding pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) - pool_conf.start = 0 pool_conf.output_x = int(math.ceil((pool_conf.img_size + \ 2*pool_conf.padding - pool_conf.size_x) / \ float(pool_conf.stride))) + 1 pool_conf.output_y = int(math.ceil((pool_conf.img_size_y + \ 2*pool_conf.padding_y - pool_conf.size_y) / \ float(pool_conf.stride_y))) + 1 - else: - raise ValueError('At least one of start and padding should be set.') def parse_image(image, input_layer_name, image_conf): image_conf.channels = image.channels @@ -1603,7 +1588,7 @@ def __init__( pool_conf = self.config.inputs[input_index].pool_conf print("output size for %s is %d*%d " % ( name, pool_conf.output_y, pool_conf.output_x)) - self.set_layer_size((pool_conf.output_x ** 2) * pool_conf.channels) + self.set_layer_size((pool_conf.output_x * pool_conf.output_y) * pool_conf.channels) @config_layer('batch_norm') class BatchNormLayer(LayerBase): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 47db197f422ea..5e7e66a908ee0 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -210,7 +210,7 @@ def __str__(self): def layer_support(*attrs): - attrs_list = list(attrs) + attrs_list = list(attrs) attrs_list.append(DEVICE) def decorator(method): @functools.wraps(method) @@ -1627,7 +1627,9 @@ def img_conv_layer(input, filter_size, num_filters, @layer_support() def img_pool_layer(input, pool_size, name=None, num_channels=None, pool_type=None, - stride=1, start=None, padding=0, layer_attr=None): + stride=1, start=None, padding=0, layer_attr=None, + pool_size_y=None, stride_y=None, padding_y=None, + img_width=None): """ Image pooling Layer. @@ -1635,25 +1637,34 @@ def img_pool_layer(input, pool_size, name=None, .. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/ - :param padding: pooling padding + :param padding: pooling padding width. :type padding: int + :param padding_y: pooling padding height. It's equal to padding by default. + :type padding_y: int|None :param name: name of pooling layer :type name: basestring. :param input: layer's input :type input: LayerOutput - :param pool_size: pooling size + :param pool_size: pooling window width :type pool_size: int + :param pool_size_y: pooling window height. It's eaqual to pool_size by default. + :type pool_size_y: int|None :param num_channels: number of input channel. :type num_channels: int :param pool_type: pooling type. MaxPooling or AveragePooling. Default is MaxPooling. :type pool_type: BasePoolingType - :param stride: stride of pooling. + :param stride: stride width of pooling. :type stride: int - :param start: start position of pooling operation. - :type start: int + :param stride_y: stride height of pooling. It is equal to stride by default. + :type stride_y: int|None + :param start: start position of pooling operation. Note it is deprecated now. + :type start: int|None :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute + :param img_width: the width of input feature map. If it is None, the input feature + map should be square. + :type img_width: int|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -1666,17 +1677,29 @@ def img_pool_layer(input, pool_size, name=None, elif isinstance(pool_type, AvgPooling): pool_type.name = 'avg' + type_name = pool_type.name + '-projection' \ + if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)) \ + else pool_type.name + + pool_size_y = pool_size if pool_size_y is None else pool_size_y + stride_y = stride if stride_y is None else stride_y + padding_y = padding if padding_y is None else padding_y + Layer( name=name, type=LayerType.POOL_LAYER, inputs=[Input(input.name, pool=Pool( - pool_type=''.join([pool_type.name, '-projection']), + pool_type=type_name, channels=num_channels, size_x=pool_size, start=start, stride=stride, - padding=padding + padding=padding, + size_y=pool_size_y, + stride_y=stride_y, + padding_y=padding_y, + img_width=img_width ))], **ExtraLayerAttribute.to_kwargs(layer_attr) ) @@ -2751,7 +2774,7 @@ def __real_step__(*args): tmp = recurrent_group(step=__real_step__, input=real_input, reverse=False, name=name) - + return tmp diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index e59e93acbe33a..ab4057d9d6c6b 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -170,13 +170,13 @@ def simple_img_conv_pool(input, filter_size, num_filters, pool_size, name=None, :type shared_bias: bool :param conv_layer_attr: see img_conv_layer for details :type conv_layer_attr: ExtraLayerAttribute - :param pool_stride: see img_conv_layer for details + :param pool_stride: see img_pool_layer for details :type pool_stride: int - :param pool_start: see img_conv_layer for details + :param pool_start: see img_pool_layer for details. It is deprecated now. :type pool_start: int - :param pool_padding: see img_conv_layer for details + :param pool_padding: see img_pool_layer for details :type pool_padding: int - :param pool_layer_attr: see img_conv_layer for details + :param pool_layer_attr: see img_pool_layer for details :type pool_layer_attr: ExtraLayerAttribute :return: Layer's output :rtype: LayerOutput @@ -243,7 +243,7 @@ def img_conv_bn_pool(input, filter_size, num_filters, pool_size, name=None, :param bn_layer_attr: ParameterAttribute. :param pool_stride: see img_pool_layer's document. :type pool_stride: int - :param pool_start: see img_pool_layer's document. + :param pool_start: see img_pool_layer's document. It is deprecated now. :type pool_start: int :param pool_padding: see img_pool_layer's document. :type pool_padding: int @@ -555,7 +555,7 @@ def lstmemory_unit(input, name=None, size=None, param_attr=None, :type gate_act: BaseActivation :param state_act: lstm state activiation type. :type state_act: BaseActivation - :param mixed_bias_attr: bias parameter attribute of mixed layer. + :param mixed_bias_attr: bias parameter attribute of mixed layer. False means no bias, None means default bias. :type mixed_bias_attr: ParameterAttribute|False :param lstm_bias_attr: bias parameter attribute of lstm layer. diff --git a/python/paddle/trainer_config_helpers/poolings.py b/python/paddle/trainer_config_helpers/poolings.py index d627daab0c496..3d2320f3ffc42 100644 --- a/python/paddle/trainer_config_helpers/poolings.py +++ b/python/paddle/trainer_config_helpers/poolings.py @@ -19,6 +19,8 @@ "BasePoolingType", "MaxPooling", "AvgPooling", + "CudnnMaxPooling", + "CudnnAvgPooling", "SumPooling", "SquareRootNPooling" ] @@ -26,7 +28,7 @@ class BasePoolingType(object): """ - Base Pooling Type. + Base Pooling Type. Note these pooling types are used for sequence input, not for images. Each PoolingType contains one parameter: @@ -55,7 +57,24 @@ class MaxPooling(BasePoolingType): def __init__(self, output_max_index=None): BasePoolingType.__init__(self, "max") self.output_max_index = output_max_index - + + +class CudnnMaxPooling(BasePoolingType): + """ + Cudnn max pooling only support GPU. Return the maxinum value in the + pooling window. + """ + def __init__(self): + BasePoolingType.__init__(self, "cudnn-max-pool") + + +class CudnnAvgPooling(BasePoolingType): + """ + Cudnn average pooling only support GPU. Return the average value in the + pooling window. + """ + def __init__(self): + BasePoolingType.__init__(self, "cudnn-avg-pool") class AvgPooling(BasePoolingType): """ diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 29928b6f7b423..359652f3d09c7 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -1,4 +1,4 @@ -7e6919d17562516e9a1d9a88de1fb3b9 img_layers.protostr +86c0815275a9d5eb902e23c6a592f58a img_layers.protostr a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 9c038249ec8ff719753a746cdb04c026 layer_activations.protostr 5913f87b39cee3b2701fa158270aca26 projections.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py index 6c8ba8be846e5..f33357c3906fd 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/img_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/img_layers.py @@ -7,8 +7,10 @@ img = data_layer(name='image', size=256*256) +# the parse_conv in config_parse.py is not strictly accurate when filter_size +# is not square. So here set square filter_size. img_conv = img_conv_layer(input=img, num_channels=1, num_filters=64, - filter_size=(32, 64), padding=(1, 0), stride=(1, 1), + filter_size=(32, 32), padding=(1, 1), stride=(1, 1), act=LinearActivation()) img_bn = batch_norm_layer(input=img_conv, act=ReluActivation()) @@ -17,4 +19,4 @@ img_pool = img_pool_layer(input=img_conv, pool_size=32, pool_type=MaxPooling()) -outputs(img_pool, img_norm) \ No newline at end of file +outputs(img_pool, img_norm) From 9e11ca8096a57cda6d91741c064b362180ff2a50 Mon Sep 17 00:00:00 2001 From: gangliao Date: Mon, 10 Oct 2016 10:15:07 +0800 Subject: [PATCH 014/180] Use C++ 11 atomic_flag in MacOS as spin lock (#175) * Use C++ 11 atomic_flag in MacOS as spin lock * Add unittest for it. --- paddle/trainer/tests/test_CompareSparse.cpp | 2 +- paddle/utils/arch/osx/Locks.cpp | 24 ++------- paddle/utils/tests/CMakeLists.txt | 1 + paddle/utils/tests/test_SpinLock.cpp | 57 +++++++++++++++++++++ 4 files changed, 63 insertions(+), 21 deletions(-) create mode 100644 paddle/utils/tests/test_SpinLock.cpp diff --git a/paddle/trainer/tests/test_CompareSparse.cpp b/paddle/trainer/tests/test_CompareSparse.cpp index ff37d7b364840..311dd333a1b16 100644 --- a/paddle/trainer/tests/test_CompareSparse.cpp +++ b/paddle/trainer/tests/test_CompareSparse.cpp @@ -57,7 +57,7 @@ std::vector trainerOnePassTest(const string& configFile, << " sparseUpdate=" << sparseUpdate; srand(FLAGS_seed); *ThreadLocalRand::getSeed() = FLAGS_seed; - + ThreadLocalRandomEngine::get().seed(FLAGS_seed); if (useGpu) { CHECK_LE(trainerCount, gNumDevices); } diff --git a/paddle/utils/arch/osx/Locks.cpp b/paddle/utils/arch/osx/Locks.cpp index 44bab7198d5f4..b3ec454976520 100644 --- a/paddle/utils/arch/osx/Locks.cpp +++ b/paddle/utils/arch/osx/Locks.cpp @@ -15,12 +15,9 @@ limitations under the License. */ #include "paddle/utils/Locks.h" #include "paddle/utils/Logging.h" #include +#include #include -#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 -#include -#endif - namespace paddle { class SemaphorePrivate { @@ -55,12 +52,7 @@ void Semaphore::post() { class SpinLockPrivate { public: -#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 - os_unfair_lock lock_; -#else - SpinLockPrivate(): lock_(OS_SPINLOCK_INIT) {} - OSSpinLock lock_; -#endif + std::atomic_flag lock_ = ATOMIC_FLAG_INIT; char padding_[64 - sizeof(lock_)]; // Padding to cache line size }; @@ -68,19 +60,11 @@ SpinLock::SpinLock(): m(new SpinLockPrivate()) {} SpinLock::~SpinLock() { delete m; } void SpinLock::lock() { -#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 - os_unfair_lock_lock(&m->lock_); -#else - OSSpinLockLock(&m->lock_); -#endif + while (m->lock_.test_and_set(std::memory_order_acquire)) {} } void SpinLock::unlock() { -#if MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_12 - os_unfair_lock_unlock(&m->lock_); -#else - OSSpinLockUnlock(&m->lock_); -#endif + m->lock_.clear(std::memory_order_release); } diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/utils/tests/CMakeLists.txt index 51f1889392845..adf489fafe722 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/utils/tests/CMakeLists.txt @@ -4,6 +4,7 @@ add_simple_unittest(test_Thread) add_simple_unittest(test_StringUtils) add_simple_unittest(test_CustomStackTrace) add_simple_unittest(test_ThreadBarrier) +add_simple_unittest(test_SpinLock) add_executable( test_CustomStackTracePrint diff --git a/paddle/utils/tests/test_SpinLock.cpp b/paddle/utils/tests/test_SpinLock.cpp new file mode 100644 index 0000000000000..ebc84e0f52d82 --- /dev/null +++ b/paddle/utils/tests/test_SpinLock.cpp @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/utils/Logging.h" +#include "paddle/utils/CommandLineParser.h" +#include "paddle/utils/Util.h" +#include "paddle/utils/Locks.h" + +P_DEFINE_int32(test_thread_num, 100, "testing thread number"); + +void testNormalImpl(size_t thread_num, const std::function + & callback) { + paddle::SpinLock mutex; + std::vector threads; + threads.reserve(thread_num); + + size_t count = 0; + for (size_t i = 0; i < thread_num; ++i) { + threads.emplace_back([&thread_num, &count, &mutex, &callback]{ + callback(thread_num, count, mutex); + }); + } + for (auto& thread : threads) { + thread.join(); + } + // Check whether all threads reach this point or not + CHECK_EQ(count, thread_num); +} + +TEST(ThreadSpinLock, normalTest) { + for (auto &thread_num : {10, 30, 50 , 100 , 300, 1000}) { + testNormalImpl(thread_num, [](size_t thread_num, + size_t& count, paddle::SpinLock& mutex){ + std::lock_guard lock(mutex); + ++count; + }); + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + paddle::initMain(argc, argv); + return RUN_ALL_TESTS(); +} From 86bb5ef1693c601fd72fc0d5f16a239cb9f8f68a Mon Sep 17 00:00:00 2001 From: QiJune Date: Mon, 10 Oct 2016 10:18:07 +0800 Subject: [PATCH 015/180] Read git sha1 when building Paddle, and add it to PADDLE_VERSION macro * save the model file including git sha1 --- CMakeLists.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index af6a13efbde9e..44e93f22c0eaf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,6 +65,19 @@ else() Subversion_WC_INFO(${PROJ_ROOT} Project) add_definitions(-DPADDLE_VERSION=${Project_WC_REVISION}) endif() + elseif(EXISTS ${PROJ_ROOT}/.git/) + find_package(Git REQUIRED) + execute_process( + COMMAND ${GIT_EXECUTABLE} log -1 --format=%H + WORKING_DIRECTORY ${PROJ_ROOT} + OUTPUT_VARIABLE GIT_SHA1 + RESULT_VARIABLE GIT_RESULT + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + if(NOT ${GIT_RESULT}) + add_definitions(-DPADDLE_VERSION=\"${GIT_SHA1}\") + else() + message(WARNING "Cannot add paddle version from git tag") + endif() endif() endif() From 199a6a4b5c62583d05d4c3199a13891dcba576c5 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 10 Oct 2016 11:21:44 +0800 Subject: [PATCH 016/180] add weight for cost layer interface (#177) --- .../paddle/trainer_config_helpers/layers.py | 44 +++++++++++++++---- .../tests/configs/check.md5 | 1 + .../tests/configs/generate_protostr.sh | 2 +- .../configs/test_cost_layers_with_weight.py | 14 ++++++ 4 files changed, 51 insertions(+), 10 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 5e7e66a908ee0..7699c90db716c 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2777,29 +2777,49 @@ def __real_step__(*args): return tmp +def __cost_input__(input, label, weight=None): + """ + inputs and parents for cost layers. + """ + ipts = [Input(input.name), Input(label.name)] + parents = [input, label] + if weight is not None: + assert weight.layer_type == LayerType.DATA + ipts.append(Input(weight.name)) + parents.append(weight) + return ipts, parents + @wrap_name_default() -def regression_cost(input, label, cost='square_error', name=None): +def regression_cost(input, label, weight=None, cost='square_error', name=None): """ Regression Layer. TODO(yuyang18): Complete this method. :param name: layer name. + :type name: basestring :param input: Network prediction. + :type input: LayerOutput :param label: Data label. + :type label: LayerOutput + :param weight: The weight affects the cost, namely the scale of cost. + It is an optional argument. + :type weight: LayerOutput :param cost: Cost method. + :type cost: basestring :return: LayerOutput object. + :rtype: LayerOutput """ - Layer(inputs=[Input(input.name), Input(label.name)], type=cost, name=name) - return LayerOutput( - name, LayerType.COST, parents=[input, label] - ) + ipts, parents = __cost_input__(input, label, weight) + + Layer(inputs=ipts, type=cost, name=name) + return LayerOutput(name, LayerType.COST, parents=parents) @wrap_name_default("cost") @layer_support() -def classification_cost(input, label, name=None, +def classification_cost(input, label, weight=None, name=None, cost="multi-class-cross-entropy", evaluator=classification_error_evaluator, layer_attr=None): @@ -2812,6 +2832,9 @@ def classification_cost(input, label, name=None, :type input: LayerOutput :param label: label layer name. data_layer often. :type label: LayerOutput + :param weight: The weight affects the cost, namely the scale of cost. + It is an optional argument. + :type weight: LayerOutput :param cost: cost method. :type cost: basestring :param evaluator: Evaluator method. @@ -2823,7 +2846,10 @@ def classification_cost(input, label, name=None, assert input.layer_type != LayerType.DATA assert isinstance(input.activation, SoftmaxActivation) assert label.layer_type == LayerType.DATA - Layer(name=name, type=cost, inputs=[Input(input.name), Input(label.name)], + + ipts, parents = __cost_input__(input, label, weight) + + Layer(name=name, type=cost, inputs=ipts, **ExtraLayerAttribute.to_kwargs(layer_attr)) def __add_evaluator__(e): @@ -2835,7 +2861,7 @@ def __add_evaluator__(e): assert isinstance(e.for_classification, bool) assert e.for_classification - e(name=e.__name__, input=input, label=label) + e(name=e.__name__, input=input, label=label, weight=weight) if not isinstance(evaluator, collections.Sequence): evaluator = [evaluator] @@ -2843,7 +2869,7 @@ def __add_evaluator__(e): for each_evaluator in evaluator: __add_evaluator__(each_evaluator) - return LayerOutput(name, LayerType.COST, parents=[input, label]) + return LayerOutput(name, LayerType.COST, parents=parents) def conv_operator(img, filter, filter_size, num_filters, diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 359652f3d09c7..3ecfff2071630 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -4,6 +4,7 @@ a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 5913f87b39cee3b2701fa158270aca26 projections.protostr 6b39e34beea8dfb782bee9bd3dea9eb5 simple_rnn_layers.protostr 0fc1409600f1a3301da994ab9d28b0bf test_cost_layers.protostr +6cd5f28a3416344f20120698470e0a4c test_cost_layers_with_weight.protostr 144bc6d3a509de74115fa623741797ed test_expand_layer.protostr 2378518bdb71e8c6e888b1842923df58 test_fc.protostr 8bb44e1e5072d0c261572307e7672bda test_grumemory_layer.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index fc2acbd41ed90..5514ee65e5a62 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -8,7 +8,7 @@ configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers -test_rnn_group) +test_cost_layers_with_weight test_rnn_group) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py new file mode 100644 index 0000000000000..29749cbb66637 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.py @@ -0,0 +1,14 @@ +from paddle.trainer_config_helpers import * + +settings( + learning_rate=1e-4, + batch_size=1000 +) + +data = data_layer(name='input', size=300) +lbl = data_layer(name='label', size=1) +wt = data_layer(name='weight', size=1) +fc = fc_layer(input=data, size=10, act=SoftmaxActivation()) + +outputs(classification_cost(input=fc, label=lbl, weight=wt), + regression_cost(input=fc, label=lbl, weight=wt)) From 9f244e4a392c3d528572413d4969c7265150dc50 Mon Sep 17 00:00:00 2001 From: Mark Date: Mon, 10 Oct 2016 13:14:26 +0800 Subject: [PATCH 017/180] Should not compile the two files if -DWITH_AVX=OFF. (#163) * If cmake -DWITH_AVX=OFF during configuration, should not compile the file src/hl_math.cc and src/hl_avx_functions.cc. --- paddle/cuda/CMakeLists.txt | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) mode change 100644 => 100755 paddle/cuda/CMakeLists.txt diff --git a/paddle/cuda/CMakeLists.txt b/paddle/cuda/CMakeLists.txt old mode 100644 new mode 100755 index e03a9a1baa004..cdb730bb3cec7 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/cuda/CMakeLists.txt @@ -2,10 +2,17 @@ set(AVX_SOURCES src/hl_math.cc src/hl_avx_functions.cc ) -set(CUDA_SOURCES - src/hl_time.cc - src/hl_cpu_functions.cc - ${AVX_SOURCES}) + +if(WITH_AVX) + set(CUDA_SOURCES + src/hl_time.cc + src/hl_cpu_functions.cc + ${AVX_SOURCES}) +else() + set(CUDA_SOURCES + src/hl_time.cc + src/hl_cpu_functions.cc) +endif() set(CUDA_CXX_WITH_GPU_SOURCES src/hl_cuda_cublas.cc From 3edc4202b0005ea9cbdf6fac03347bd5d560887c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 11 Oct 2016 15:20:40 +0800 Subject: [PATCH 018/180] Add travis for osx (#189) --- .travis.yml | 11 +++++++++- paddle/.set_python_path.sh | 2 +- ...ore_install.sh => before_install.linux.sh} | 0 paddle/scripts/travis/before_install.osx.sh | 13 ++++++++++++ paddle/scripts/travis/build_and_test.sh | 21 ++++++++++++++++--- 5 files changed, 42 insertions(+), 5 deletions(-) rename paddle/scripts/travis/{before_install.sh => before_install.linux.sh} (100%) create mode 100755 paddle/scripts/travis/before_install.osx.sh diff --git a/.travis.yml b/.travis.yml index d3dae9efd416b..119d01a4fa8fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,9 +2,17 @@ language: cpp cache: ccache sudo: required dist: trusty +os: + - linux + - osx env: - JOB=DOCS - JOB=BUILD_AND_TEST +matrix: + exclude: + - os: osx + env: JOB=DOCS # Only generate documentation in linux + addons: apt: packages: @@ -28,8 +36,9 @@ addons: - libgflags-dev - libgtest-dev before_install: + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi + - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - pip install wheel protobuf sphinx breathe recommonmark - - sudo paddle/scripts/travis/before_install.sh script: - paddle/scripts/travis/main.sh notifications: diff --git a/paddle/.set_python_path.sh b/paddle/.set_python_path.sh index f7019b27f8f02..657fdf65e92c9 100755 --- a/paddle/.set_python_path.sh +++ b/paddle/.set_python_path.sh @@ -33,7 +33,7 @@ if ! python -c "import paddle" >/dev/null 2>/dev/null; then esac done shift $(($OPTIND - 1)) - export PYTHONPATH=$PYPATH + export PYTHONPATH=$PYPATH:$PYTHONPATH $@ else echo "paddle package is already in your PYTHONPATH. But unittest need a clean environment." diff --git a/paddle/scripts/travis/before_install.sh b/paddle/scripts/travis/before_install.linux.sh similarity index 100% rename from paddle/scripts/travis/before_install.sh rename to paddle/scripts/travis/before_install.linux.sh diff --git a/paddle/scripts/travis/before_install.osx.sh b/paddle/scripts/travis/before_install.osx.sh new file mode 100755 index 0000000000000..f438e69b822aa --- /dev/null +++ b/paddle/scripts/travis/before_install.osx.sh @@ -0,0 +1,13 @@ +#!/bin/bash +brew update +brew tap homebrew/science +brew install python +sudo pip install --upgrade protobuf==2.6.0 +brew install homebrew/versions/protobuf260 --without-python +brew install cmake python glog gflags openblas wget md5sha1sum + +wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz -O gtest.tar.gz +tar xf gtest.tar.gz +cd googletest-release-1.8.0/ +cmake . +make install diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 3ea633be32702..a73c32344c8ab 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -1,7 +1,22 @@ #!/bin/bash source ./common.sh -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -make -j `nproc` -env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j `nproc`" +CMAKE_EXTRA="" +if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + CMAKE_EXTRA="-DPYTHON_LIBRARY=/usr/local/Cellar/python/2.7.12_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config/libpython2.7.dylib" +fi + + +cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON ${CMAKE_EXTRA} + +NPROC=1 +if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + NRPOC=`nproc` +elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + NPROC=`sysctl -n hw.ncpu` +fi + + +make -j $NPROC +env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" sudo make install sudo paddle version From 6f0d634e02646ec05eef5de4f8643d97da0e2845 Mon Sep 17 00:00:00 2001 From: backyes Date: Tue, 11 Oct 2016 16:49:29 +0800 Subject: [PATCH 019/180] set MKL search path with intel64 (#188) --- cmake/cblas.cmake | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 529b4b9d15d09..57c32a54cd727 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -17,10 +17,17 @@ ## Find MKL First. set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains MKL") -find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT}/include) -find_library(MKL_CORE_LIB NAMES mkl_core PATHS ${MKL_ROOT}/lib) -find_library(MKL_SEQUENTIAL_LIB NAMES mkl_sequential PATHS ${MKL_ROOT}/lib) -find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS ${MKL_ROOT}/lib) +find_path(MKL_INCLUDE_DIR mkl.h PATHS + ${MKL_ROOT}/include) +find_library(MKL_CORE_LIB NAMES mkl_core PATHS + ${MKL_ROOT}/lib + ${MKL_ROOT}/lib/intel64) +find_library(MKL_SEQUENTIAL_LIB NAMES mkl_sequential PATHS + ${MKL_ROOT}/lib + ${MKL_ROOT}/lib/intel64) +find_library(MKL_INTEL_LP64 NAMES mkl_intel_lp64 PATHS + ${MKL_ROOT}/lib + ${MKL_ROOT}/lib/intel64) if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) From e26f220df81546d360e7759b3d96a2aa27d06ffc Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 11 Oct 2016 21:29:52 -0700 Subject: [PATCH 020/180] Mnist demo (#162) * added mnist demo * modified .gitignore for .project files * normalize pixel in mnist_provider.py and set use_gpu=0 --- .gitignore | 4 ++- demo/mnist/.gitignore | 6 ++++ demo/mnist/data/generate_list.py | 21 +++++++++++++ demo/mnist/data/get_mnist_data.sh | 22 +++++++++++++ demo/mnist/mnist_provider.py | 33 ++++++++++++++++++++ demo/mnist/train.sh | 31 ++++++++++++++++++ demo/mnist/vgg_16_mnist.py | 52 +++++++++++++++++++++++++++++++ 7 files changed, 168 insertions(+), 1 deletion(-) create mode 100644 demo/mnist/.gitignore create mode 100644 demo/mnist/data/generate_list.py create mode 100644 demo/mnist/data/get_mnist_data.sh create mode 100644 demo/mnist/mnist_provider.py create mode 100755 demo/mnist/train.sh create mode 100644 demo/mnist/vgg_16_mnist.py diff --git a/.gitignore b/.gitignore index 7e21ba0b750df..65ba217de37c8 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,6 @@ build/ *.user .vscode -.idea \ No newline at end of file +.idea +.project +.pydevproject diff --git a/demo/mnist/.gitignore b/demo/mnist/.gitignore new file mode 100644 index 0000000000000..810910fd5ca56 --- /dev/null +++ b/demo/mnist/.gitignore @@ -0,0 +1,6 @@ +data/raw_data +data/*.list +mnist_vgg_model +plot.png +train.log +*pyc diff --git a/demo/mnist/data/generate_list.py b/demo/mnist/data/generate_list.py new file mode 100644 index 0000000000000..1b929048b4d82 --- /dev/null +++ b/demo/mnist/data/generate_list.py @@ -0,0 +1,21 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +o = open("./" + "train.list", "w") +o.write("./data/raw_data/train" +"\n") +o.close() + +o = open("./" + "test.list", "w") +o.write("./data/raw_data/t10k" +"\n") +o.close() \ No newline at end of file diff --git a/demo/mnist/data/get_mnist_data.sh b/demo/mnist/data/get_mnist_data.sh new file mode 100644 index 0000000000000..c3ef99445049d --- /dev/null +++ b/demo/mnist/data/get_mnist_data.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env sh +# This scripts downloads the mnist data and unzips it. + +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +rm -rf "$DIR/raw_data" +mkdir "$DIR/raw_data" +cd "$DIR/raw_data" + +echo "Downloading..." + +for fname in train-images-idx3-ubyte train-labels-idx1-ubyte t10k-images-idx3-ubyte t10k-labels-idx1-ubyte +do + if [ ! -e $fname ]; then + wget --no-check-certificate http://yann.lecun.com/exdb/mnist/${fname}.gz + gunzip ${fname}.gz + fi +done + +cd $DIR +rm -f *.list +python generate_list.py + diff --git a/demo/mnist/mnist_provider.py b/demo/mnist/mnist_provider.py new file mode 100644 index 0000000000000..0f14ded2dce93 --- /dev/null +++ b/demo/mnist/mnist_provider.py @@ -0,0 +1,33 @@ +from paddle.trainer.PyDataProvider2 import * + + +# Define a py data provider +@provider(input_types=[ + dense_vector(28 * 28), + integer_value(10) +]) +def process(settings, filename): # settings is not used currently. + imgf = filename + "-images-idx3-ubyte" + labelf = filename + "-labels-idx1-ubyte" + f = open(imgf, "rb") + l = open(labelf, "rb") + + f.read(16) + l.read(8) + + # Define number of samples for train/test + if "train" in filename: + n = 60000 + else: + n = 10000 + + for i in range(n): + label = ord(l.read(1)) + pixels = [] + for j in range(28*28): + pixels.append(float(ord(f.read(1))) / 255.0) + yield { "pixel": pixels, 'label': label } + + f.close() + l.close() + \ No newline at end of file diff --git a/demo/mnist/train.sh b/demo/mnist/train.sh new file mode 100755 index 0000000000000..084b32ac390b8 --- /dev/null +++ b/demo/mnist/train.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e +config=vgg_16_mnist.py +output=./mnist_vgg_model +log=train.log + +paddle train \ +--config=$config \ +--dot_period=10 \ +--log_period=100 \ +--test_all_data_in_one_period=1 \ +--use_gpu=0 \ +--trainer_count=1 \ +--num_passes=100 \ +--save_dir=$output \ +2>&1 | tee $log + +python -m paddle.utils.plotcurve -i $log > plot.png diff --git a/demo/mnist/vgg_16_mnist.py b/demo/mnist/vgg_16_mnist.py new file mode 100644 index 0000000000000..ad0a4de3215ca --- /dev/null +++ b/demo/mnist/vgg_16_mnist.py @@ -0,0 +1,52 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +is_predict = get_config_arg("is_predict", bool, False) + +####################Data Configuration ################## + + +if not is_predict: + data_dir='./data/' + define_py_data_sources2(train_list= data_dir + 'train.list', + test_list= data_dir + 'test.list', + module='mnist_provider', + obj='process') + +######################Algorithm Configuration ############# +settings( + batch_size = 128, + learning_rate = 0.1 / 128.0, + learning_method = MomentumOptimizer(0.9), + regularization = L2Regularization(0.0005 * 128) +) + +#######################Network Configuration ############# + +data_size=1*28*28 +label_size=10 +img = data_layer(name='pixel', size=data_size) + +# small_vgg is predined in trainer_config_helpers.network +predict = small_vgg(input_image=img, + num_channels=1, + num_classes=label_size) + +if not is_predict: + lbl = data_layer(name="label", size=label_size) + outputs(classification_cost(input=predict, label=lbl)) +else: + outputs(predict) From 43f7d7b7684b8c4cee4e396f57c4c841f41b2dbe Mon Sep 17 00:00:00 2001 From: luotao1 Date: Thu, 13 Oct 2016 15:11:52 +0800 Subject: [PATCH 021/180] add interface and unittest for nce layer (#180) * add interface and unittest for nce layer * follow comments --- doc/ui/api/trainer_config_helpers/layers.rst | 6 + paddle/gserver/layers/NCELayer.cpp | 13 +- paddle/trainer/tests/test_config.conf | 222 ++++++------------ .../paddle/trainer_config_helpers/layers.py | 89 ++++++- 4 files changed, 170 insertions(+), 160 deletions(-) diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index c1d7a7ce81530..5271262d20d55 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -371,6 +371,12 @@ ctc_layer :members: ctc_layer :noindex: +nce_layer +----------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: nce_layer + :noindex: + hsigmoid --------- .. automodule:: paddle.trainer_config_helpers.layers diff --git a/paddle/gserver/layers/NCELayer.cpp b/paddle/gserver/layers/NCELayer.cpp index a896e16a6027b..4faebe5d2ad6f 100644 --- a/paddle/gserver/layers/NCELayer.cpp +++ b/paddle/gserver/layers/NCELayer.cpp @@ -21,14 +21,18 @@ limitations under the License. */ namespace paddle { /** - * Noise-contrastive estimation + * Noise-contrastive estimation. * Implements the method in the following paper: - * A fast and simple algorithm for training neural probabilistic language models + * A fast and simple algorithm for training neural probabilistic language models. + * + * The config file api is nce_layer. */ class NCELayer : public Layer { int numClasses_; - int numInputs_; // number of input layer besides labelLayer and weightLayer + /// number of input layer besides labelLayer and weightLayer + int numInputs_; LayerPtr labelLayer_; + /// weight layer, can be None LayerPtr weightLayer_; WeightList weights_; std::unique_ptr biases_; @@ -43,7 +47,8 @@ class NCELayer : public Layer { real weight; }; std::vector samples_; - bool prepared_; // whether samples_ is prepared + /// whether samples_ is prepared + bool prepared_; Argument sampleOut_; IVectorPtr labelIds_; diff --git a/paddle/trainer/tests/test_config.conf b/paddle/trainer/tests/test_config.conf index 5d2e2ba9df5c7..664e18cb98681 100644 --- a/paddle/trainer/tests/test_config.conf +++ b/paddle/trainer/tests/test_config.conf @@ -13,157 +13,71 @@ # See the License for the specific language governing permissions and # limitations under the License. -#Todo(luotao02) This config is only used for unitest. It is out of date now, and will be updated later. - -default_initial_std(0.5) - -model_type("nn") - -DataLayer( - name = "input", - size = 3, -) - -DataLayer( - name = "weight", - size = 1, -) - -Layer( - name = "layer1_1", - type = "fc", - size = 5, - active_type = "sigmoid", - inputs = "input", -) - -Layer( - name = "layer1_2", - type = "fc", - size = 12, - active_type = "linear", - inputs = Input("input", parameter_name='sharew'), -) - -Layer( - name = "layer1_3", - type = "fc", - size = 3, - active_type = "tanh", - inputs = "input", -) - -Layer( - name = "layer1_5", - type = "fc", - size = 3, - active_type = "tanh", - inputs = Input("input", - learning_rate=0.01, - momentum=0.9, - decay_rate=0.05, - initial_mean=0.0, - initial_std=0.01, - format = "csc", - nnz = 4) -) - -FCLayer( - name = "layer1_4", - size = 5, - active_type = "square", - inputs = "input", - drop_rate = 0.5, -) - -Layer( - name = "pool", - type = "pool", - inputs = Input("layer1_2", - pool = Pool(pool_type="cudnn-avg-pool", - channels = 1, - size_x = 2, - size_y = 3, - img_width = 3, - padding = 1, - padding_y = 2, - stride = 2, - stride_y = 3)) -) - -Layer( - name = "concat", - type = "concat", - inputs = ["layer1_3", "layer1_4"], -) - -MixedLayer( - name = "output", - size = 3, - active_type = "softmax", - inputs = [ - FullMatrixProjection("layer1_1", - learning_rate=0.1), - TransposedFullMatrixProjection("layer1_2", parameter_name='sharew'), - FullMatrixProjection("concat"), - IdentityProjection("layer1_3"), - ], -) - -Layer( - name = "label", - type = "data", - size = 1, -) - -Layer( - name = "cost", - type = "multi-class-cross-entropy", - inputs = ["output", "label", "weight"], -) - -Layer( - name = "cost2", - type = "nce", - num_classes = 3, - active_type = "sigmoid", - neg_sampling_dist = [0.1, 0.3, 0.6], - inputs = ["layer1_2", "label", "weight"], -) - -Evaluator( - name = "error", - type = "classification_error", - inputs = ["output", "label", "weight"] -) - -Inputs("input", "label", "weight") -Outputs("cost", "cost2") - -TrainData( - ProtoData( - files = "dummy_list", - constant_slots = [1.0], - async_load_data = True, - ) -) - -TestData( - SimpleData( - files = "trainer/tests/sample_filelist.txt", - feat_dim = 3, - context_len = 0, - buffer_capacity = 1000000, - async_load_data = False, - ), -) - -Settings( - algorithm = "sgd", - num_batches_per_send_parameter = 1, - num_batches_per_get_parameter = 1, - batch_size = 100, - learning_rate = 0.001, - learning_rate_decay_a = 1e-5, - learning_rate_decay_b = 0.5, -) +from paddle.trainer_config_helpers import * + +TrainData(ProtoData( + files = "dummy_list", + constant_slots = [1.0], + async_load_data = True)) + +TestData(SimpleData( + files = "trainer/tests/sample_filelist.txt", + feat_dim = 3, + context_len = 0, + buffer_capacity = 1000000, + async_load_data = False)) + +settings(batch_size = 100) + +data = data_layer(name='input', size=3) + +wt = data_layer(name='weight', size=1) + +fc1 = fc_layer(input=data, size=5, + bias_attr=True, + act=SigmoidActivation()) + +fc2 = fc_layer(input=data, size=12, + bias_attr=True, + param_attr=ParamAttr(name='sharew'), + act=LinearActivation()) + +fc3 = fc_layer(input=data, size=3, + bias_attr=True, + act=TanhActivation()) + +fc4 = fc_layer(input=data, size=5, + bias_attr=True, + layer_attr=ExtraAttr(drop_rate=0.5), + act=SquareActivation()) + +pool = img_pool_layer(input=fc2, + pool_size=2, + pool_size_y=3, + num_channels=1, + padding=1, + padding_y=2, + stride=2, + stride_y=3, + img_width=3, + pool_type=CudnnAvgPooling()) + +concat = concat_layer(input=[fc3, fc4]) + +with mixed_layer(size=3, act=SoftmaxActivation()) as output: + output += full_matrix_projection(input=fc1) + output += trans_full_matrix_projection(input=fc2, + param_attr=ParamAttr(name='sharew')) + output += full_matrix_projection(input=concat) + output += identity_projection(input=fc3) + +lbl = data_layer(name='label', size=1) + +cost = classification_cost(input=output, label=lbl, weight=wt, + layer_attr=ExtraAttr(device=-1)) + +nce = nce_layer(input=fc2, label=lbl, weight=wt, + num_classes=3, + neg_distribution=[0.1, 0.3, 0.6]) + +outputs(cost, nce) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 7699c90db716c..745e61b2eb0bc 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -50,6 +50,7 @@ 'slope_intercept_layer', 'trans_full_matrix_projection', 'linear_comb_layer', 'convex_comb_layer', 'ctc_layer', 'crf_layer', 'crf_decoding_layer', + 'nce_layer', 'cross_entropy_with_selfnorm', 'cross_entropy', 'multi_binary_label_cross_entropy', 'rank_cost', 'lambda_cost', 'huber_cost', @@ -115,6 +116,7 @@ class LayerType(object): CTC_LAYER = "ctc" CRF_LAYER = "crf" CRF_DECODING_LAYER = "crf_decoding" + NCE_LAYER = 'nce' RANK_COST = "rank-cost" LAMBDA_COST = "lambda_cost" @@ -168,7 +170,7 @@ class LayerOutput(object): :param activation: Layer Activation. :type activation: BaseActivation. :param parents: Layer's parents. - :type parents: list|tuple|collection.Sequence + :type parents: list|tuple|collections.Sequence """ def __init__(self, name, layer_type, parents=None, activation=None, @@ -1988,10 +1990,16 @@ def concat_layer(input, act=None, name=None, layer_attr=None): Concat all input vector into one huge vector. Inputs can be list of LayerOutput or list of projection. + The example usage is: + + .. code-block:: python + + concat = concat_layer(input=[layer1, layer2]) + :param name: Layer name. :type name: basestring :param input: input layers or projections - :type input: list|tuple|collection.Sequence + :type input: list|tuple|collections.Sequence :param act: Activation type. :type act: BaseActivation :param layer_attr: Extra Layer Attribute. @@ -3488,6 +3496,83 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None): parents.append(label) return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=size) +@wrap_bias_attr_default(has_bias=True) +@wrap_name_default() +@layer_support() +def nce_layer(input, label, num_classes, weight=None, + num_neg_samples=10, neg_distribution=None, + name=None, bias_attr=None, layer_attr=None): + """ + Noise-contrastive estimation. + Implements the method in the following paper: + A fast and simple algorithm for training neural probabilistic language models. + + The example usage is: + + .. code-block:: python + + cost = nce_layer(input=layer1, label=layer2, weight=layer3, + num_classes=3, neg_distribution=[0.1,0.3,0.6]) + + :param name: layer name + :type name: basestring + :param input: input layers. It could be a LayerOutput of list/tuple of LayerOutput. + :type input: LayerOutput|list|tuple|collections.Sequence + :param label: label layer + :type label: LayerOutput + :param weight: weight layer, can be None(default) + :type weight: LayerOutput + :param num_classes: number of classes. + :type num_classes: int + :param num_neg_samples: number of negative samples. Default is 10. + :type num_neg_samples: int + :param neg_distribution: The distribution for generating the random negative labels. + A uniform distribution will be used if not provided. + If not None, its length must be equal to num_classes. + :type neg_distribution: list|tuple|collections.Sequence|None + :param bias_attr: Bias parameter attribute. True if no bias. + :type bias_attr: ParameterAttribute|None|False + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: layer name. + :rtype: LayerOutput + """ + if isinstance(input, LayerOutput): + input = [input] + assert isinstance(input, collections.Sequence) + assert isinstance(label, LayerOutput) + assert label.layer_type == LayerType.DATA + if neg_distribution is not None: + assert isinstance(neg_distribution, collections.Sequence) + assert len(neg_distribution) == num_classes + assert sum(neg_distribution) == 1 + + ipts_for_layer = [] + parents = [] + for each_input in input: + assert isinstance(each_input, LayerOutput) + ipts_for_layer.append(each_input.name) + parents.append(each_input) + ipts_for_layer.append(label.name) + parents.append(label) + + if weight is not None: + assert isinstance(weight, LayerOutput) + assert weight.layer_type == LayerType.DATA + ipts_for_layer.append(weight.name) + parents.append(weight) + + Layer( + name=name, + type=LayerType.NCE_LAYER, + num_classes=num_classes, + neg_sampling_dist=neg_distribution, + num_neg_samples=num_neg_samples, + inputs=ipts_for_layer, + bias=ParamAttr.to_bias(bias_attr), + **ExtraLayerAttribute.to_kwargs(layer_attr) + ) + return LayerOutput(name, LayerType.NCE_LAYER, parents=parents) """ following are cost Layers. From c2d418dbfde2db19ec575bfb3aeb091b37fbc8ef Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 14 Oct 2016 09:35:51 +0800 Subject: [PATCH 022/180] Merge internal changes (#198) * fix DataProvider create function args bug Change-Id: I9e3a1c535c805bf30204a14aea8d5143ff534784 * remove PserverForPython.h which is not used Change-Id: I2b27f1f3c11a42766a92fc689f0f5f1f73ee1d70 * add internal document script Change-Id: Ia0fec79456caea0b271f9903cc13e8a3d32e0774 --- paddle/gserver/dataproviders/DataProvider.h | 3 +- paddle/internals/scripts/docs.sh | 20 ++++ paddle/pserver/PserverForPython.h | 116 -------------------- 3 files changed, 22 insertions(+), 117 deletions(-) create mode 100755 paddle/internals/scripts/docs.sh delete mode 100644 paddle/pserver/PserverForPython.h diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index 534491d70d546..c24546374abf3 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -308,7 +308,8 @@ class DataProvider { /** * @brief create only used for unittest. */ - inline static DataProvider* create(const DataConfig &config, bool useGpu) { + inline static DataProvider* create(const DataConfig &config, + bool useGpu = FLAGS_use_gpu) { return create(config, ModelConfig(), useGpu); } diff --git a/paddle/internals/scripts/docs.sh b/paddle/internals/scripts/docs.sh new file mode 100755 index 0000000000000..517405c120a47 --- /dev/null +++ b/paddle/internals/scripts/docs.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved + +cd `dirname $0` + +# Add set -e, cd to directory. +set -e +mkdir -p $PWD/../../../build +cd $PWD/../../../build + +# Compile Documentation only. +cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=ON +make paddle_docs paddle_docs_cn + +# remove old docs. mv new docs in deeplearning.baidu.com +scp -r doc/html paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com:/home/paddle_doc/www/doc_new +ssh paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com "cd ~/www/ && rm -r doc && mv doc_new doc" + +scp -r doc_cn/html paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com:/home/paddle_doc/www/doc_cn_new +ssh paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com "cd ~/www/ && rm -r doc_cn && mv doc_cn_new doc_cn" diff --git a/paddle/pserver/PserverForPython.h b/paddle/pserver/PserverForPython.h deleted file mode 100644 index 5bbeae8bd8b97..0000000000000 --- a/paddle/pserver/PserverForPython.h +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/pserver/ParameterClient.h" -#include "paddle/pserver/ParameterServer.h" -#include "paddle/parameter/Parameter.h" -#include - -namespace paddle { - -struct PyObjectDeleter { - void operator()(PyObject* obj) { - if (obj) { - Py_DECREF(obj); - } - } -}; - -class ParameterClientPy : public ParameterClient { -protected: - typedef std::unique_ptr PyObjectPtr; - - std::vector parameter_; - int initArgc_; - char** initArgv_; - -public: - ParameterClientPy(std::vector configs, int argc, - std::vector argv, bool useGpu) { - initArgc_ = argc; - initArgv_ = new char* [argc]; - for (int i = 0; i < argc; i++) { - initArgv_[i] = new char[argv[i].size()]; - strcpy(initArgv_[i], // NOLINT - argv[i].c_str()); // NOLINT TODO(yuyang18): use snprintf instead. - } - ParameterConfig pyConfig; - ParameterPtr param; - for (auto& config : configs) { - pyConfig.ParseFromString(config); - param.reset(new Parameter(pyConfig, useGpu)); - parameter_.push_back(param); - } - Py_Initialize(); - CHECK(Py_IsInitialized()); - } - - ~ParameterClientPy() { - delete initArgv_; - Py_Finalize(); - } - - Parameter getParameter(int idx) { return *(parameter_[idx].get()); } - - void initClientPy() { - initMain(initArgc_, initArgv_); - CHECK(init(parameter_)) << "Init Client Failed."; - } - - void setConfigPy(std::string config) { - OptimizationConfig optConfig; - optConfig.ParseFromString(config); - setConfig(optConfig); - } - - bool inStatusPy(int status) { return inStatus(PServerStatus(status)); } - - void setStatusPy(int status) { setStatus(PServerStatus(status)); } - - void waitForStatusPy(int status) { waitForStatus(PServerStatus(status)); } - - void sendParameterPy(int updateMode, int parameterType, int numSamples, - real cost, bool sendBackParameter) { - sendParameter(ParameterUpdateMode(updateMode), ParameterType(parameterType), - int64_t(numSamples), real(cost), sendBackParameter); - } - - template - std::string asyncCallPy(const char* serviceName, const char* funcName, - const std::string in) { - ProtoIn protoIn; - ProtoOut protoOut; - std::mutex waitLock; - std::string data; - protoIn.ParseFromString(in); - waitLock.lock(); - auto callback = [&](ProtoOut* pOut, bool isSuccessful) { - if (isSuccessful) { - pOut->SerializeToString(&data); - } else { - LOG(INFO) << "Async Talk Failed."; - } - waitLock.unlock(); - }; - - ubClient_.asyncCall(serviceName, funcName, protoIn, - &protoOut, callback); - waitLock.lock(); - protoOut.SerializeToString(&data); - return data; - } -}; - -} // namespace paddle From cebdb66768832bf05d6b5f35d7752375361e5f0a Mon Sep 17 00:00:00 2001 From: luotao1 Date: Fri, 14 Oct 2016 09:51:30 +0800 Subject: [PATCH 023/180] hierarchical rnn document, add new config example (#106) * hierarchical rnn document, add new config example * update inputs_type of label * add check for unsupported config * refine hierarchical document * refine doc title * update docs, fix paddle to PaddlePaddle * follow comments --- doc/ui/api/trainer_config_helpers/layers.rst | 6 + doc_cn/algorithm/rnn/hierarchical-layer.md | 66 +++++ doc_cn/algorithm/rnn/hierarchical-rnn.md | 267 ++++++++++++++++++ doc_cn/algorithm/rnn/rnn-tutorial.md | 96 +++++++ doc_cn/index.rst | 5 +- .../RecurrentGradientMachine.cpp | 29 +- paddle/gserver/layers/AverageLayer.cpp | 10 +- .../layers/SequenceLastInstanceLayer.cpp | 9 +- paddle/gserver/tests/sequenceGen.py | 8 +- paddle/gserver/tests/sequence_nest_rnn.conf | 5 +- .../tests/sequence_nest_rnn_multi_input.conf | 5 +- .../tests/test_RecurrentGradientMachine.cpp | 12 +- 12 files changed, 480 insertions(+), 38 deletions(-) create mode 100644 doc_cn/algorithm/rnn/hierarchical-layer.md create mode 100644 doc_cn/algorithm/rnn/hierarchical-rnn.md create mode 100644 doc_cn/algorithm/rnn/rnn-tutorial.md diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index 5271262d20d55..55f5623b0faef 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -130,6 +130,12 @@ gru_step_layer Recurrent Layer Group ===================== +memory +------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: memory + :noindex: + recurrent_group --------------- .. automodule:: paddle.trainer_config_helpers.layers diff --git a/doc_cn/algorithm/rnn/hierarchical-layer.md b/doc_cn/algorithm/rnn/hierarchical-layer.md new file mode 100644 index 0000000000000..5282bbbcb82d0 --- /dev/null +++ b/doc_cn/algorithm/rnn/hierarchical-layer.md @@ -0,0 +1,66 @@ +# 支持双层序列作为输入的Layer + +## 概述 + +在自然语言处理任务中,序列是一种常见的数据类型。一个独立的词语,可以看作是一个非序列输入,或者,我们称之为一个0层的序列;由词语构成的句子,是一个单层序列;若干个句子构成一个段落,是一个双层的序列。 + +双层序列是一个嵌套的序列,它的每一个元素,又是一个单层的序列。这是一种非常灵活的数据组织方式,帮助我们构造一些复杂的输入信息。 + +我们可以按照如下层次定义非序列,单层序列,以及双层序列。 + ++ 0层序列:一个独立的元素,类型可以是PaddlePaddle支持的任意输入数据类型 ++ 单层序列:排成一列的多个元素,每个元素是一个0层序列,元素之间的顺序是重要的输入信息 ++ 双层序列:排成一列的多个元素,每个元素是一个单层序列,称之为双层序列的一个子序列(subseq),subseq的每个元素是一个0层序列 + + +在 PaddlePaddle中,下面这些Layer能够接受双层序列作为输入,完成相应的计算。 +## pooling_layer + +pooling_layer的使用示例如下,详细见配置API。 +```python +seq_pool = pooling_layer(input=layer, + pooling_type=AvgPooling(), + agg_level=AggregateLevel.EACH_SEQUENCE) +``` +- `pooling_type` 目前支持两种,分别是:MaxPooling()和AvgPooling()。 +- `agg_level=AggregateLevel.TIMESTEP`时(默认值): + - 作用:双层序列经过运算变成一个0层序列,或单层序列经过运算变成一个0层序列 + - 输入:一个双层序列,或一个单层序列 + - 输出:一个0层序列,即整个输入序列(单层或双层)的平均值(或最大值) +- `agg_level=AggregateLevel.EACH_SEQUENCE`时: + - 作用:一个双层序列经过运算变成一个单层序列 + - 输入:必须是一个双层序列 + - 输出:一个单层序列,序列的每个元素是原来双层序列每个subseq元素的平均值(或最大值) + +## last_seq 和 first_seq + +last_seq的使用示例如下(first_seq类似),详细见配置API。 +```python +last = last_seq(input=layer, + agg_level=AggregateLevel.EACH_SEQUENCE) +``` +- `agg_level=AggregateLevel.TIMESTEP`时(默认值): + - 作用:一个双层序列经过运算变成一个0层序列,或一个单层序列经过运算变成一个0层序列 + - 输入:一个双层序列或一个单层序列 + - 输出:一个0层序列,即整个输入序列(双层或者单层)最后一个,或第一个元素。 +- `agg_level=AggregateLevel.EACH_SEQUENCE`时: + - 作用:一个双层序列经过运算变成一个单层序列 + - 输入:必须是一个双层序列 + - 输出:一个单层序列,其中每个元素是双层序列中每个subseq最后一个(或第一个)元素。 + +## expand_layer + +expand_layer的使用示例如下,详细见配置API。 +```python +expand = expand_layer(input=layer1, + expand_as=layer2, + expand_level=ExpandLevel.FROM_TIMESTEP) +``` +- `expand_level=ExpandLevel.FROM_TIMESTEP`时(默认值): + - 作用:一个0层序列经过运算扩展成一个单层序列,或者一个双层序列 + - 输入:layer1必须是一个0层序列,是待扩展的数据;layer2可以是一个单层序列,或者是一个双层序列,提供扩展的长度信息 + - 输出:一个单层序列,或一个双层序列,输出序列的类型(双层序列,或单层序列)和序列中含有元素的数目同 layer2一致。若输出是单层序列,单层序列的每个元素(0层序列),都是对layer1元素的拷贝;若输出是双层序列,双层序列每个subseq中每个元素(0层序列),都是对layer1元素的拷贝 +- `expand_level=ExpandLevel.FROM_SEQUENCE`时: + - 作用:一个单层序列经过运算扩展成一个双层序列 + - 输入:layer1必须是一个单层序列,是待扩展的数据;layer2必须是一个双层序列,提供扩展的长度信息 + - 输出:一个双层序列,序列中含有元素的数目同layer2一致。要求单层序列含有元素的数目(0层序列),和双层序列含有subseq 的数目一致。单层序列第i个元素(0层序列),被扩展为一个单层序列,构成了输出双层序列的第i个subseq。 \ No newline at end of file diff --git a/doc_cn/algorithm/rnn/hierarchical-rnn.md b/doc_cn/algorithm/rnn/hierarchical-rnn.md new file mode 100644 index 0000000000000..979fe13e2ecbd --- /dev/null +++ b/doc_cn/algorithm/rnn/hierarchical-rnn.md @@ -0,0 +1,267 @@ +# 双层RNN配置与示例 + +我们在`paddle/gserver/tests/test_RecurrentGradientMachine`单测中,通过多组语义相同的单双层RNN配置,讲解如何使用双层RNN。 + +## 示例1:双进双出,subseq间无memory + +配置:单层RNN(`sequence_layer_group`)和双层RNN(`sequence_nest_layer_group`),语义完全相同。 + +### 读取双层序列的方法 + +首先,我们看一下单双层序列的不同数据组织形式(您也可以采用别的组织形式): + +- 单层序列的数据(`Sequence/tour_train_wdseg`)如下,一共有10个样本。每个样本由两部分组成,一个label(此处都为2)和一个已经分词后的句子。 + +```text +2 酒店 有 很 舒适 的 床垫 子 , 床上用品 也 应该 是 一人 一 换 , 感觉 很 利落 对 卫生 很 放心 呀 。 +2 很 温馨 , 也 挺 干净 的 * 地段 不错 , 出来 就 有 全家 , 离 地铁站 也 近 , 交通 很方便 * 就是 都 不 给 刷牙 的 杯子 啊 , 就 第一天 给 了 一次性杯子 * +2 位置 方便 , 强烈推荐 , 十一 出去玩 的 时候 选 的 , 对面 就是 华润万家 , 周围 吃饭 的 也 不少 。 +2 交通便利 , 吃 很 便利 , 乾 浄 、 安静 , 商务 房 有 电脑 、 上网 快 , 价格 可以 , 就 早餐 不 好吃 。 整体 是 不错 的 。 適 合 出差 來 住 。 +2 本来 准备 住 两 晚 , 第 2 天 一早 居然 停电 , 且 无 通知 , 只有 口头 道歉 。 总体来说 性价比 尚可 , 房间 较 新 , 还是 推荐 . +2 这个 酒店 去过 很多 次 了 , 选择 的 主要原因 是 离 客户 最 便宜 相对 又 近 的 酒店 +2 挺好 的 汉庭 , 前台 服务 很 热情 , 卫生 很 整洁 , 房间 安静 , 水温 适中 , 挺好 ! +2 HowardJohnson 的 品质 , 服务 相当 好 的 一 家 五星级 。 房间 不错 、 泳池 不错 、 楼层 安排 很 合理 。 还有 就是 地理位置 , 简直 一 流 。 就 在 天一阁 、 月湖 旁边 , 离 天一广场 也 不远 。 下次 来 宁波 还会 住 。 +2 酒店 很干净 , 很安静 , 很 温馨 , 服务员 服务 好 , 各方面 都 不错 * +2 挺好 的 , 就是 没 窗户 , 不过 对 得 起 这 价格 +``` + +- 双层序列的数据(`Sequence/tour_train_wdseg.nest`)如下,一共有4个样本。样本间用空行分开,代表不同的双层序列,序列数据和上面的完全一样。每个样本的子句数分别为2,3,2,3。 + +```text +2 酒店 有 很 舒适 的 床垫 子 , 床上用品 也 应该 是 一人 一 换 , 感觉 很 利落 对 卫生 很 放心 呀 。 +2 很 温馨 , 也 挺 干净 的 * 地段 不错 , 出来 就 有 全家 , 离 地铁站 也 近 , 交通 很方便 * 就是 都 不 给 刷牙 的 杯子 啊 , 就 第一天 给 了 一次性杯子 * + +2 位置 方便 , 强烈推荐 , 十一 出去玩 的 时候 选 的 , 对面 就是 华润万家 , 周围 吃饭 的 也 不少 。 +2 交通便利 , 吃 很 便利 , 乾 浄 、 安静 , 商务 房 有 电脑 、 上网 快 , 价格 可以 , 就 早餐 不 好吃 。 整体 是 不错 的 。 適 合 出差 來 住 。 +2 本来 准备 住 两 晚 , 第 2 天 一早 居然 停电 , 且 无 通知 , 只有 口头 道歉 。 总体来说 性价比 尚可 , 房间 较 新 , 还是 推荐 . + +2 这个 酒店 去过 很多 次 了 , 选择 的 主要原因 是 离 客户 最 便宜 相对 又 近 的 酒店 +2 挺好 的 汉庭 , 前台 服务 很 热情 , 卫生 很 整洁 , 房间 安静 , 水温 适中 , 挺好 ! + +2 HowardJohnson 的 品质 , 服务 相当 好 的 一 家 五星级 。 房间 不错 、 泳池 不错 、 楼层 安排 很 合理 。 还有 就是 地理位置 , 简直 一 流 。 就 在 天一阁 、 月湖 旁边 , 离 天一广场 也 不远 。 下次 来 宁波 还会 住 。 +2 酒店 很干净 , 很安静 , 很 温馨 , 服务员 服务 好 , 各方面 都 不错 * +2 挺好 的 , 就是 没 窗户 , 不过 对 得 起 这 价格 +``` + +其次,我们看一下单双层序列的不同dataprovider(见`sequenceGen.py`): + +- 单层序列的dataprovider如下: + - word_slot是integer_value_sequence类型,代表单层序列。 + - label是integer_value类型,代表一个向量。 + +```python +def hook(settings, dict_file, **kwargs): + settings.word_dict = dict_file + settings.input_types = [integer_value_sequence(len(settings.word_dict)), + integer_value(3)] + +@provider(init_hook=hook) +def process(settings, file_name): + with open(file_name, 'r') as fdata: + for line in fdata: + label, comment = line.strip().split('\t') + label = int(''.join(label.split())) + words = comment.split() + word_slot = [settings.word_dict[w] for w in words if w in settings.word_dict] + yield word_slot, label +``` + +- 双层序列的dataprovider如下: + - word_slot是integer_value_sub_sequence类型,代表双层序列。 + - label是integer_value_sequence类型,代表单层序列,即一个子句一个label。注意:也可以为integer_value类型,代表一个向量,即一个句子一个label。通常根据任务需求进行不同设置。 + - 关于dataprovider中input_types的详细用法,参见PyDataProvider2。 + +```python +def hook2(settings, dict_file, **kwargs): + settings.word_dict = dict_file + settings.input_types = [integer_value_sub_sequence(len(settings.word_dict)), + integer_value_sequence(3)] + +@provider(init_hook=hook2) +def process2(settings, file_name): + with open(file_name) as fdata: + label_list = [] + word_slot_list = [] + for line in fdata: + if (len(line)) > 1: + label,comment = line.strip().split('\t') + label = int(''.join(label.split())) + words = comment.split() + word_slot = [settings.word_dict[w] for w in words if w in settings.word_dict] + label_list.append(label) + word_slot_list.append(word_slot) + else: + yield word_slot_list, label_list + label_list = [] + word_slot_list = [] +``` + +### 模型中的配置 + +首先,我们看一下单层序列的配置(见`sequence_layer_group.conf`)。注意:batchsize=5表示一次过5句单层序列,因此2个batch就可以完成1个pass。 + +```python +settings(batch_size=5) + +data = data_layer(name="word", size=dict_dim) + +emb = embedding_layer(input=data, size=word_dim) + +# (lstm_input + lstm) is equal to lstmemory +with mixed_layer(size=hidden_dim*4) as lstm_input: + lstm_input += full_matrix_projection(input=emb) + +lstm = lstmemory_group(input=lstm_input, + size=hidden_dim, + act=TanhActivation(), + gate_act=SigmoidActivation(), + state_act=TanhActivation(), + lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) + +lstm_last = last_seq(input=lstm) + +with mixed_layer(size=label_dim, + act=SoftmaxActivation(), + bias_attr=True) as output: + output += full_matrix_projection(input=lstm_last) + +outputs(classification_cost(input=output, label=data_layer(name="label", size=1))) + +``` +其次,我们看一下语义相同的双层序列配置(见`sequence_nest_layer_group.conf`),并对其详细分析: + +- batchsize=2表示一次过2句双层序列。但从上面的数据格式可知,2句双层序列和5句单层序列的数据完全一样。 +- data_layer和embedding_layer不关心数据是否是序列格式,因此两个配置在这两层上的输出是一样的。 +- lstmemory: + - 单层序列过了一个mixed_layer和lstmemory_group。 + - 双层序列在同样的mixed_layer和lstmemory_group外,直接加了一层group。由于这个外层group里面没有memory,表示subseq间不存在联系,即起到的作用仅仅是把双层seq拆成单层,因此双层序列过完lstmemory的输出和单层的一样。 +- last_seq: + - 单层序列直接取了最后一个元素 + - 双层序列首先(last_seq层)取了每个subseq的最后一个元素,将其拼接成一个新的单层序列;接着(expand_layer层)将其扩展成一个新的双层序列,其中第i个subseq中的所有向量均为输入的单层序列中的第i个向量;最后(average_layer层)取了每个subseq的平均值。 + - 分析得出:第一个last_seq后,每个subseq的最后一个元素就等于单层序列的最后一个元素,而expand_layer和average_layer后,依然保持每个subseq最后一个元素的值不变(这两层仅是为了展示它们的用法,实际中并不需要)。因此单双层序列的输出是一样旳。 + +```python +settings(batch_size=2) + +data = data_layer(name="word", size=dict_dim) + +emb_group = embedding_layer(input=data, size=word_dim) + +# (lstm_input + lstm) is equal to lstmemory +def lstm_group(lstm_group_input): + with mixed_layer(size=hidden_dim*4) as group_input: + group_input += full_matrix_projection(input=lstm_group_input) + + lstm_output = lstmemory_group(input=group_input, + name="lstm_group", + size=hidden_dim, + act=TanhActivation(), + gate_act=SigmoidActivation(), + state_act=TanhActivation(), + lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) + return lstm_output + +lstm_nest_group = recurrent_group(input=SubsequenceInput(emb_group), + step=lstm_group, + name="lstm_nest_group") +# hasSubseq ->(seqlastins) seq +lstm_last = last_seq(input=lstm_nest_group, agg_level=AggregateLevel.EACH_SEQUENCE) + +# seq ->(expand) hasSubseq +lstm_expand = expand_layer(input=lstm_last, expand_as=emb_group, expand_level=ExpandLevel.FROM_SEQUENCE) + +# hasSubseq ->(average) seq +lstm_average = pooling_layer(input=lstm_expand, + pooling_type=AvgPooling(), + agg_level=AggregateLevel.EACH_SEQUENCE) + +with mixed_layer(size=label_dim, + act=SoftmaxActivation(), + bias_attr=True) as output: + output += full_matrix_projection(input=lstm_average) + +outputs(classification_cost(input=output, label=data_layer(name="label", size=1))) +``` +## 示例2:双进双出,subseq间有memory + +配置:单层RNN(`sequence_rnn.conf`),双层RNN(`sequence_nest_rnn.conf`和`sequence_nest_rnn_readonly_memory.conf`),语义完全相同。 + +### 读取双层序列的方法 + +我们看一下单双层序列的不同数据组织形式和dataprovider(见`rnn_data_provider.py`) +```python +data = [ + [[[1, 3, 2], [4, 5, 2]], 0], + [[[0, 2], [2, 5], [0, 1, 2]], 1], +] + +@provider(input_types=[integer_value_sub_sequence(10), + integer_value(3)]) +def process_subseq(settings, file_name): + for d in data: + yield d + +@provider(input_types=[integer_value_sequence(10), + integer_value(3)]) +def process_seq(settings, file_name): + for d in data: + seq = [] +``` +- 单层序列:有两句,分别为[1,3,2,4,5,2]和[0,2,2,5,0,1,2]。 +- 双层序列:有两句,分别为[[1,3,2],[4,5,2]](2个子句)和[[0,2],[2,5],[0,1,2]](3个子句)。 +- 单双层序列的label都分别是0和1 + +### 模型中的配置 + +我们选取单双层序列配置中的不同部分,来对比分析两者语义相同的原因。 + +- 单层序列:过了一个很简单的recurrent_group。每一个时间步,当前的输入y和上一个时间步的输出rnn_state做了一个全链接。 + +```python +def step(y): + mem = memory(name="rnn_state", size=hidden_dim) + return fc_layer(input=[y, mem], + size=hidden_dim, + act=TanhActivation(), + bias_attr=True, + name="rnn_state") + +out = recurrent_group(step=step, input=emb) +``` +- 双层序列,外层memory是一个元素: + - 内层inner_step的recurrent_group和单层序列的几乎一样。除了boot_layer=outer_mem,表示将外层的outer_mem作为内层memory的初始状态。外层outer_step中,outer_mem是一个子句的最后一个向量,即整个双层group是将前一个子句的最后一个向量,作为下一个子句memory的初始状态。 + - 从输入数据上看,单双层序列的句子是一样的,只是双层序列将其又做了子序列划分。因此双层序列的配置中,必须将前一个子句的最后一个元素,作为boot_layer传给下一个子句的memory,才能保证和单层序列的配置中“每一个时间步都用了上一个时间步的输出结果”一致。 + +```python +def outer_step(x): + outer_mem = memory(name="outer_rnn_state", size=hidden_dim) + def inner_step(y): + inner_mem = memory(name="inner_rnn_state", + size=hidden_dim, + boot_layer=outer_mem) + return fc_layer(input=[y, inner_mem], + size=hidden_dim, + act=TanhActivation(), + bias_attr=True, + name="inner_rnn_state") + + inner_rnn_output = recurrent_group( + step=inner_step, + input=x) + last = last_seq(input=inner_rnn_output, name="outer_rnn_state") + + return inner_rnn_output + +out = recurrent_group(step=outer_step, input=SubsequenceInput(emb)) +``` +- 双层序列,外层memory是单层序列: + - 由于外层每个时间步返回的是一个子句,这些子句的长度往往不等长。因此当外层有is_seq=True的memory时,内层是**无法直接使用**它的,即内层memory的boot_layer不能链接外层的这个memory。 + - 如果内层memory想**间接使用**这个外层memory,只能通过`pooling_layer`、`last_seq`或`first_seq`这三个layer将它先变成一个元素。但这种情况下,外层memory必须有boot_layer,否则在第0个时间步时,由于外层memory没有任何seq信息,因此上述三个layer的前向会报出“**Check failed: input.sequenceStartPositions**”的错误。 + +## 示例3:双进双出,输入不等长 + +TBD + +## 示例4:beam_search的生成 + +TBD \ No newline at end of file diff --git a/doc_cn/algorithm/rnn/rnn-tutorial.md b/doc_cn/algorithm/rnn/rnn-tutorial.md new file mode 100644 index 0000000000000..7a553054c8039 --- /dev/null +++ b/doc_cn/algorithm/rnn/rnn-tutorial.md @@ -0,0 +1,96 @@ +# Recurrent Group教程 + +## 概述 + +序列数据是自然语言处理任务面对的一种主要输入数据类型。 + +一句话是由词语构成的序列,多句话进一步构成了段落。因此,段落可以看作是一个嵌套的双层的序列,这个序列的每个元素又是一个序列。 + +双层序列是PaddlePaddle支持的一种非常灵活的数据组织方式,帮助我们更好地描述段落、多轮对话等更为复杂的语言数据。基于双层序列输入,我们可以设计搭建一个灵活的、层次化的RNN,分别从词语和句子级别编码输入数据,同时也能够引入更加复杂的记忆机制,更好地完成一些复杂的语言理解任务。 + +在PaddlePaddle中,`recurrent_group`是一种任意复杂的RNN单元,用户只需定义RNN在一个时间步内完成的计算,PaddlePaddle负责完成信息和误差在时间序列上的传播。 + +更进一步,`recurrent_group`同样可以扩展到双层序列的处理上。通过两个嵌套的`recurrent_group`分别定义子句级别和词语级别上需要完成的运算,最终实现一个层次化的复杂RNN。 + +目前,在PaddlePaddle中,能够对双向序列进行处理的有`recurrent_group`和部分Layer,具体可参考文档:支持双层序列作为输入的Layer。 + +## 相关概念 + +### 基本原理 +`recurrent_group` 是PaddlePaddle支持的一种任意复杂的RNN单元。使用者只需要关注于设计RNN在一个时间步之内完成的计算,PaddlePaddle负责完成信息和梯度在时间序列上的传播。 + +PaddlePaddle中,`recurrent_group`的一个简单调用如下: + +``` python +recurrent_group(step, input, reverse) +``` +- step:一个可调用的函数,定义一个时间步之内RNN单元完成的计算 +- input:输入,必须是一个单层序列,或者一个双层序列 +- reverse:是否以逆序处理输入序列 + +使用`recurrent_group`的核心是设计step函数的计算逻辑。step函数内部可以自由组合PaddlePaddle支持的各种layer,完成任意的运算逻辑。`recurrent_group` 的输入(即input)会成为step函数的输入,由于step 函数只关注于RNN一个时间步之内的计算,在这里`recurrent_group`替我们完成了原始输入数据的拆分。 + +### 输入 +`recurrent_group`处理的输入序列主要分为以下三种类型: + +- **数据输入**:一个双层序列进入`recurrent_group`会被拆解为一个单层序列,一个单层序列进入`recurrent_group`会被拆解为非序列,然后交给step函数,这一过程对用户是完全透明的。可以有以下两种:1)通过data_layer拿到的用户输入;2)其它layer的输出。 + +- **只读Memory输入**:`StaticInput` 定义了一个只读的Memory,由`StaticInput`指定的输入不会被`recurrent_group`拆解,`recurrent_group` 循环展开的每个时间步总是能够引用所有输入,可以是一个非序列,或者一个单层序列。 + +- **序列生成任务的输入**:`GeneratedInput`只用于在序列生成任务中指定输入数据。 + +### 输入示例 + +序列生成任务大多遵循encoder-decoer架构,encoder和decoder可以是能够处理序列的任意神经网络单元,而RNN是最流行的选择。 + +给定encoder输出和当前词,decoder每次预测产生下一个最可能的词语。在这种结构中,decoder接受两个输入: + +- 要生成的目标序列:是decoder的数据输入,也是decoder循环展开的依据,`recurrent_group`会对这类输入进行拆解。 + +- encoder输出,可以是一个非序列,或者一个单层序列:是一个unbounded memory,decoder循环展开的每一个时间步会引用全部结果,不应该被拆解,这种类型的输入必须通过`StaticInput`指定。关于Unbounded Memory的更多讨论请参考论文 [Neural Turning Machine](https://arxiv.org/abs/1410.5401)。 + +在序列生成任务中,decoder RNN总是引用上一时刻预测出的词的词向量,作为当前时刻输入。`GeneratedInput`自动完成这一过程。 + +### 输出 +`step`函数必须返回一个或多个Layer的输出,这个Layer的输出会作为整个`recurrent_group` 最终的输出结果。在输出的过程中,`recurrent_group` 会将每个时间步的输出拼接,这个过程对用户也是透明的。 + +### memory +memory只能在`recurrent_group`中定义和使用。memory不能独立存在,必须指向一个PaddlePaddle定义的Layer。引用memory得到这layer上一时刻输出,因此,可以将memory理解为一个时延操作。 + +可以显示地指定一个layer的输出用于初始化memory。不指定时,memory默认初始化为0。 + +## 双层RNN介绍 +`recurrent_group`帮助我们完成对输入序列的拆分,对输出的合并,以及计算逻辑在序列上的循环展开。 + +利用这种特性,两个嵌套的`recurrent_group`能够处理双层序列,实现词语和句子两个级别的双层RNN结构。 + +- 单层(word-level)RNN:每个状态(state)对应一个词(word)。 +- 双层(sequence-level)RNN:一个双层RNN由多个单层RNN组成,每个单层RNN(即双层RNN的每个状态)对应一个子句(subseq)。 + +为了描述方便,下文以NLP任务为例,将含有子句(subseq)的段落定义为一个双层序列,将含有词语的句子定义为一个单层序列,那么0层序列即为一个词语。 + +## 双层RNN的使用 + +### 训练流程的使用方法 +使用 `recurrent_group`需要遵循以下约定: + +- **单进单出**:输入和输出都是单层序列。 + - 如果有多个输入,不同输入序列含有的词语数必须严格相等。 + - 输出一个单层序列,输出序列的词语数和输入序列一致。 + - memory:在step函数中定义 memory指向一个layer,通过引用memory得到这个layer上一个时刻输出,形成recurrent 连接。memory的is_seq参数必须为false。如果没有定义memory,每个时间步之内的运算是独立的。 + - boot_layer:memory的初始状态,默认初始状为0,memory的is_seq参数必须为false。 + +- **双进双出**:输入和输出都是双层序列。 + - 如果有多个输入序列,不同输入含有的子句(subseq)数必须严格相等,但子句含有的词语数可以不相等。 + - 输出一个双层序列,子句(subseq)数、子句的单词数和指定的一个输入序列一致,默认为第一个输入。 + - memory:在step函数中定义memory,指向一个layer,通过引用memory得到这个layer上一个时刻的输出,形成recurrent连接。定义在外层`recurrent_group` step函数中的memory,能够记录上一个subseq 的状态,可以是一个单层序列(只作为read-only memory),也可以是一个词语。如果没有定义memory,那么 subseq 之间的运算是独立的。 + - boot_layer:memory 初始状态,可以是一个单层序列(只作为read-only memory)或一个向量。默认不设置,即初始状态为0。 + +- **双进单出**:目前还未支持,会报错"In hierachical RNN, all out links should be from sequences now"。 + + +### 生成流程的使用方法 +使用`beam_search`需要遵循以下约定: + +- 单层RNN:从一个word生成下一个word。 +- 双层RNN:即把单层RNN生成后的subseq给拼接成一个新的双层seq。从语义上看,也不存在一个subseq直接生成下一个subseq的情况。 \ No newline at end of file diff --git a/doc_cn/index.rst b/doc_cn/index.rst index 6cf5588b5b34f..1a4908be14684 100644 --- a/doc_cn/index.rst +++ b/doc_cn/index.rst @@ -16,4 +16,7 @@ PaddlePaddle文档 算法教程 -------- -* `RNN配置 <../doc/algorithm/rnn/rnn.html>`_ +* `Recurrent Group教程 `_ +* `单层RNN示例 <../doc/algorithm/rnn/rnn.html>`_ +* `双层RNN示例 `_ +* `支持双层序列作为输入的Layer `_ diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index fc38bca3c403b..340cd1b9f8e92 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -544,6 +544,12 @@ void RecurrentGradientMachine::forward(const std::vector& inArgs, const std::vector inArgs; std::vector outArgs; frames_[i]->forward(inArgs, &outArgs, passType); + if (hasSubseq) { + for (auto& outFrameLine : outFrameLines_) { + CHECK(outFrameLine.frames[i]->getOutput().sequenceStartPositions) + << "In hierachical RNN, all out links should be from sequences."; + } + } } if (evaluator_ && passType == PASS_TEST) { this->eval(evaluator_.get()); @@ -635,16 +641,15 @@ void RecurrentGradientMachine::createInFrameInfo(int inlinkId, std::vector sequenceStartPositions; const int* subSequenceStartPositions = nullptr; - if (hasSubseq) { // for sequenceScatterAgentLayer - subSequenceStartPositions = - input.subSequenceStartPositions->getData(false); + if (hasSubseq) { // for sequenceScatterAgentLayer + subSequenceStartPositions = input.subSequenceStartPositions->getData(false); inlinkInfo->seqStartPosIndex.clear(); inlinkInfo->seqStartPosIndex.push_back(0); // first seqStartPosIndex = 0 } // maxSequenceLength_: max topLevelLength in allsamples for (int i = 0; i < maxSequenceLength_; ++i) { if (hasSubseq) { - sequenceStartPositions.push_back(0); // first element = 0 + sequenceStartPositions.push_back(0); // first element = 0 } int numSeqs = 0; for (size_t j = 0; j < numSequences; ++j) { @@ -676,9 +681,9 @@ void RecurrentGradientMachine::createInFrameInfo(int inlinkId, } if (hasSubseq) { // inFrameLine create sequenceStartPositions one time - CHECK_EQ(sequenceStartPositions.size(), - static_cast(maxSequenceLength_ + - input.getNumSubSequences())); + CHECK_EQ( + sequenceStartPositions.size(), + static_cast(maxSequenceLength_ + input.getNumSubSequences())); CHECK_EQ(inlinkInfo->seqStartPosIndex.size(), static_cast(maxSequenceLength_ + 1)); createSeqPos(sequenceStartPositions, &inlinkInfo->sequenceStartPositions); @@ -1102,10 +1107,12 @@ size_t RecurrentGradientMachine::beamShrink(std::vector& newPaths, newPaths.end(), Path::greaterPath); newPaths.resize(totalExpandCount + minNewPathSize); - real minPathLogProb = std::min_element(newPaths.end() - minNewPathSize, - newPaths.end())->logProb; - real maxPathLogProb = std::max_element(newPaths.end() - minNewPathSize, - newPaths.end())->logProb; + real minPathLogProb = + std::min_element(newPaths.end() - minNewPathSize, newPaths.end()) + ->logProb; + real maxPathLogProb = + std::max_element(newPaths.end() - minNewPathSize, newPaths.end()) + ->logProb; // Remove the already formed paths that are relatively short finalPaths_[seqId].erase( diff --git a/paddle/gserver/layers/AverageLayer.cpp b/paddle/gserver/layers/AverageLayer.cpp index 374117b7659bb..6e52217de4e63 100644 --- a/paddle/gserver/layers/AverageLayer.cpp +++ b/paddle/gserver/layers/AverageLayer.cpp @@ -64,6 +64,11 @@ void AverageLayer::forward(PassType passType) { size_t dim = getSize(); const Argument& input = getInput(0); + CHECK(input.sequenceStartPositions); + if (type_) { + CHECK(input.subSequenceStartPositions) + << "when trans_type = seq, input must hasSubseq"; + } int64_t newBatchSize = type_ ? input.getNumSubSequences() : input.getNumSequences(); ICpuGpuVectorPtr startPositions = @@ -75,11 +80,6 @@ void AverageLayer::forward(PassType passType) { // check CHECK_EQ(numSequences, (size_t)newBatchSize); CHECK_EQ(starts[numSequences], input.getBatchSize()); - if (type_) { - // when trans_type = seq, input must hasSubseq - CHECK_EQ(input.hasSubseq(), 1UL); - } - CHECK_EQ(dim, input.value->getWidth()); resetOutput(newBatchSize, dim); diff --git a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp index 12831e3668802..f4d26ba21bed6 100644 --- a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp @@ -91,6 +91,11 @@ void SequenceLastInstanceLayer::forward(PassType passType) { const Argument& input = getInput(0); // check + CHECK(input.sequenceStartPositions); + if (type_) { + CHECK(input.subSequenceStartPositions) + << "when trans_type = seq, input must hasSubseq"; + } auto startPositions = type_ ? input.subSequenceStartPositions->getVector(false) : input.sequenceStartPositions->getVector(false); @@ -98,10 +103,6 @@ void SequenceLastInstanceLayer::forward(PassType passType) { CHECK_EQ(dim, input.value->getWidth()); CHECK_EQ(startPositions->getData()[height], input.getBatchSize()); CHECK_EQ(height, startPositions->getSize() - 1); - if (type_) { - // when trans_type = seq, input must hasSubseq - CHECK_EQ(input.hasSubseq(), 1UL); - } reserveOutput(height, dim); const int* starts = startPositions->getData(); diff --git a/paddle/gserver/tests/sequenceGen.py b/paddle/gserver/tests/sequenceGen.py index cbed1f15fc415..b166e778d7a33 100644 --- a/paddle/gserver/tests/sequenceGen.py +++ b/paddle/gserver/tests/sequenceGen.py @@ -21,7 +21,7 @@ def hook(settings, dict_file, **kwargs): settings.word_dict = dict_file settings.input_types = [integer_value_sequence(len(settings.word_dict)), - integer_value_sequence(3)] + integer_value(3)] settings.logger.info('dict len : %d' % (len(settings.word_dict))) @@ -34,14 +34,14 @@ def process(settings, file_name): words = comment.split() word_slot = [settings.word_dict[w] for w in words if w in settings.word_dict] - yield word_slot, [label] + yield word_slot, label ## for hierarchical sequence network def hook2(settings, dict_file, **kwargs): settings.word_dict = dict_file settings.input_types = [integer_value_sub_sequence(len(settings.word_dict)), - integer_value_sub_sequence(3)] + integer_value_sequence(3)] settings.logger.info('dict len : %d' % (len(settings.word_dict))) @@ -57,7 +57,7 @@ def process2(settings, file_name): words = comment.split() word_slot = [settings.word_dict[w] for w in words if w in settings.word_dict] - label_list.append([label]) + label_list.append(label) word_slot_list.append(word_slot) else: yield word_slot_list, label_list diff --git a/paddle/gserver/tests/sequence_nest_rnn.conf b/paddle/gserver/tests/sequence_nest_rnn.conf index 62b8c5d072d7b..93b08eb2f8746 100644 --- a/paddle/gserver/tests/sequence_nest_rnn.conf +++ b/paddle/gserver/tests/sequence_nest_rnn.conf @@ -56,9 +56,8 @@ def outer_step(x): last = last_seq(input=inner_rnn_output, name="outer_rnn_state") # "return last" should also work. But currently RecurrentGradientMachine - # does not handle it correctly. Current implementation requires that - # all the out links are from sequences. However, it does not report error - # when the out links are not sequences. + # does not handle it, and will report error: In hierachical RNN, all out + # links should be from sequences now. return inner_rnn_output out = recurrent_group( diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf b/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf index e01b3f8e7aa5c..e8222cef525a8 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf +++ b/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf @@ -57,9 +57,8 @@ def outer_step(wid, x): last = last_seq(input=inner_rnn_output, name="outer_rnn_state") # "return last" should also work. But currently RecurrentGradientMachine - # does not handle it correctly. Current implementation requires that - # all the out links are from sequences. However, it does not report error - # when the out links are not sequences. + # does not handle it, and will report error: In hierachical RNN, all out + # links should be from sequences now. return inner_rnn_output out = recurrent_group( diff --git a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp index ae7f617371ca5..d104db3e5b32d 100644 --- a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/gserver/tests/test_RecurrentGradientMachine.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include #include #include @@ -24,7 +23,7 @@ limitations under the License. */ P_DECLARE_int32(seed); using namespace paddle; // NOLINT -using namespace std; // NOLINT +using namespace std; // NOLINT class TrainerForTest : public paddle::Trainer { public: void startTrain() { @@ -44,11 +43,10 @@ class TrainerForTest : public paddle::Trainer { */ size_t getTotalParameterSize() const { auto p = const_cast(this); - auto & params = p->getGradientMachine()->getParameters(); - return std::accumulate(params.begin(), params.end(), 0UL, - [](size_t a, const ParameterPtr& p){ - return a+p->getSize(); - }); + auto& params = p->getGradientMachine()->getParameters(); + return std::accumulate( + params.begin(), params.end(), 0UL, + [](size_t a, const ParameterPtr& p) { return a + p->getSize(); }); } }; From 91df6062804093743e52e356824e503339c7f43e Mon Sep 17 00:00:00 2001 From: luotao1 Date: Fri, 14 Oct 2016 16:02:24 +0800 Subject: [PATCH 024/180] remove some copyfrom in AgentLayer and ExpandLayer, fix warning in seq2seq config (#183) --- demo/seqToseq/seqToseq_net.py | 8 +++--- paddle/cuda/include/hl_sequence.h | 2 +- paddle/cuda/include/stub/hl_sequence_stub.h | 2 +- paddle/cuda/src/hl_cuda_sequence.cu | 4 +-- paddle/gserver/layers/AgentLayer.cpp | 29 ++++++++----------- paddle/gserver/layers/AgentLayer.h | 6 +--- paddle/gserver/layers/ExpandLayer.cpp | 31 +++++++-------------- paddle/gserver/layers/ExpandLayer.h | 7 +---- paddle/math/Matrix.cpp | 8 +++--- paddle/math/Matrix.h | 6 ++-- 10 files changed, 39 insertions(+), 64 deletions(-) diff --git a/demo/seqToseq/seqToseq_net.py b/demo/seqToseq/seqToseq_net.py index 2b0c3f34648b0..edd6ad3f739b6 100644 --- a/demo/seqToseq/seqToseq_net.py +++ b/demo/seqToseq/seqToseq_net.py @@ -96,12 +96,12 @@ def gru_encoder_decoder(data_conf, encoded_vector = concat_layer(input=[src_forward, src_backward]) with mixed_layer(size=decoder_size) as encoded_proj: - encoded_proj += full_matrix_projection(encoded_vector) + encoded_proj += full_matrix_projection(input=encoded_vector) backward_first = first_seq(input=src_backward) with mixed_layer(size=decoder_size, act=TanhActivation(), ) as decoder_boot: - decoder_boot += full_matrix_projection(backward_first) + decoder_boot += full_matrix_projection(input=backward_first) def gru_decoder_with_attention(enc_vec, enc_proj, current_word): decoder_mem = memory(name='gru_decoder', @@ -113,8 +113,8 @@ def gru_decoder_with_attention(enc_vec, enc_proj, current_word): decoder_state=decoder_mem, ) with mixed_layer(size=decoder_size * 3) as decoder_inputs: - decoder_inputs += full_matrix_projection(context) - decoder_inputs += full_matrix_projection(current_word) + decoder_inputs += full_matrix_projection(input=context) + decoder_inputs += full_matrix_projection(input=current_word) gru_step = gru_step_layer(name='gru_decoder', input=decoder_inputs, diff --git a/paddle/cuda/include/hl_sequence.h b/paddle/cuda/include/hl_sequence.h index 828c21beb2fbd..46d86b2982f06 100644 --- a/paddle/cuda/include/hl_sequence.h +++ b/paddle/cuda/include/hl_sequence.h @@ -143,7 +143,7 @@ extern void hl_context_projection_backward_weight(real* outputGrad, */ extern void hl_sequence2batch_copy(real *batch, real *sequence, - int *batchIndex, + const int *batchIndex, int seqWidth, int batchCount, bool seq2batch); diff --git a/paddle/cuda/include/stub/hl_sequence_stub.h b/paddle/cuda/include/stub/hl_sequence_stub.h index 417f40e0a69f6..aabd956c37f7d 100644 --- a/paddle/cuda/include/stub/hl_sequence_stub.h +++ b/paddle/cuda/include/stub/hl_sequence_stub.h @@ -62,7 +62,7 @@ inline void hl_context_projection_backward_weight(real* outputGrad, inline void hl_sequence2batch_copy(real *batch, real *sequence, - int *batchIndex, + const int *batchIndex, int seqWidth, int batchCount, bool seq2batch) {} diff --git a/paddle/cuda/src/hl_cuda_sequence.cu b/paddle/cuda/src/hl_cuda_sequence.cu index e028880156e5b..63824eaa4c201 100644 --- a/paddle/cuda/src/hl_cuda_sequence.cu +++ b/paddle/cuda/src/hl_cuda_sequence.cu @@ -374,7 +374,7 @@ template __global__ void KeSequence2Batch(real *batch, real *sequence, - int *batchIndex, + const int *batchIndex, int seqWidth, int batchCount) { int idx = threadIdx.x; @@ -405,7 +405,7 @@ void KeSequence2Batch(real *batch, void hl_sequence2batch_copy(real *batch, real *sequence, - int *batchIndex, + const int *batchIndex, int seqWidth, int batchCount, bool seq2batch) { diff --git a/paddle/gserver/layers/AgentLayer.cpp b/paddle/gserver/layers/AgentLayer.cpp index 056e9568852ac..5e07446c71ff6 100644 --- a/paddle/gserver/layers/AgentLayer.cpp +++ b/paddle/gserver/layers/AgentLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "AgentLayer.h" #include "paddle/utils/Logging.h" @@ -62,8 +61,8 @@ void SequenceAgentLayer::forward(PassType passType) { // get Arguments from real layers if (numSamples_ > 0 && numSamples_ < realNumSequences) { - int numRows = realOutput.sequenceStartPositions-> - getData(false)[numSamples_]; + int numRows = + realOutput.sequenceStartPositions->getData(false)[numSamples_]; CHECK(!realOutput.ids) << "Not supported"; output_.subArgFrom(realOutput, /* offset */ 0, numRows, getSize(), useGpu_, /* trans */ false, /* seqFlag */ true, @@ -141,8 +140,8 @@ void ScatterAgentLayer::forward(PassType passType) { int width = this->getSize(); if (realOutArg_.value || realOutArg_.ids) { - output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_, - width, useGpu_); + output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_, width, + useGpu_); } else { // used in generation if (realLayer_->getOutput().ids) { IVector::resizeOrCreate(output_.ids, ids_->getSize(), useGpu_); @@ -224,8 +223,8 @@ void SequenceScatterAgentLayer::forward(PassType passType) { if (realOutArg_.value || realOutArg_.ids) { CHECK(realOutArg_.sequenceStartPositions); - output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_, - width, useGpu_, /* trans */ false, /* seqFlag */ true, + output_.subArgFrom(realOutArg_, /* offset */ idIndex_, idSize_, width, + useGpu_, /* trans */ false, /* seqFlag */ true, /* seqStart */ seqStartPosIndex_, /* seqSize */ numSequences_); } else { @@ -249,11 +248,12 @@ void SequenceScatterAgentLayer::forward(PassType passType) { CHECK_NE(input.sequenceStartPositions.get(), output_.sequenceStartPositions.get()); ICpuGpuVector::resizeOrCreate(output_.sequenceStartPositions, - numSequences + 1, false); + numSequences + 1, false); int* outStarts = output_.sequenceStartPositions->getMutableData(false); - IVector::resizeOrCreate(cpuInputStartPos_, height, false); - int* inStarts = cpuInputStartPos_->getData(); + ICpuGpuVector::resizeOrCreate(inputStartPos_, height, false); + int* inStarts = inputStartPos_->getMutableData(false); + size_t offsetOut = 0; for (size_t i = 0; i < numSequences; ++i) { outStarts[i] = offsetOut; @@ -266,13 +266,8 @@ void SequenceScatterAgentLayer::forward(PassType passType) { } outStarts[numSequences] = offsetOut; - if (useGpu_) { - IVector::resizeOrCreate(inputStartPos_, height, true); - inputStartPos_->copyFrom(*cpuInputStartPos_, HPPL_STREAM_DEFAULT); - } else { - inputStartPos_ = cpuInputStartPos_; - } - outputValue->copyByRowIndex(*input.value, *inputStartPos_); + outputValue->copyByRowIndex(*input.value, + *inputStartPos_->getVector(useGpu_)); } } diff --git a/paddle/gserver/layers/AgentLayer.h b/paddle/gserver/layers/AgentLayer.h index d82078dd93329..3d7bf55834070 100644 --- a/paddle/gserver/layers/AgentLayer.h +++ b/paddle/gserver/layers/AgentLayer.h @@ -191,11 +191,7 @@ class SequenceScatterAgentLayer : public ScatterAgentLayer { protected: // use to store expanded cpuStartPositions or subSequenceStartPositions // of real layer. - IVectorPtr cpuInputStartPos_; - - // point to cpuInputStartPos_ when useGpu_ is false - // copy from cpuInputStartPos_ when useGpu_ is true - IVectorPtr inputStartPos_; + ICpuGpuVectorPtr inputStartPos_; public: explicit SequenceScatterAgentLayer(const LayerConfig& config) diff --git a/paddle/gserver/layers/ExpandLayer.cpp b/paddle/gserver/layers/ExpandLayer.cpp index bbd0b53273b43..9290ce4f6d46c 100644 --- a/paddle/gserver/layers/ExpandLayer.cpp +++ b/paddle/gserver/layers/ExpandLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "ExpandLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" @@ -53,9 +52,8 @@ void ExpandLayer::forward(PassType passType) { const Argument& shapeInput = getInput(1); const Argument& dataInput = getInput(0); size_t outputBatchSize = shapeInput.getBatchSize(); - auto startPositions = - type_ ? shapeInput.subSequenceStartPositions - : shapeInput.sequenceStartPositions; + auto startPositions = type_ ? shapeInput.subSequenceStartPositions + : shapeInput.sequenceStartPositions; size_t numSequences = startPositions->getSize() - 1; const int* starts = startPositions->getData(false); @@ -71,8 +69,7 @@ void ExpandLayer::forward(PassType passType) { // set output sequence info as shape sequence output_.sequenceStartPositions = shapeInput.sequenceStartPositions; if (shapeInput.hasSubseq()) { - output_.subSequenceStartPositions = - shapeInput.subSequenceStartPositions; + output_.subSequenceStartPositions = shapeInput.subSequenceStartPositions; } // reserve output: Expand output to batchsize of sequence data. @@ -81,8 +78,8 @@ void ExpandLayer::forward(PassType passType) { MatrixPtr inputValue = getInputValue(0); MatrixPtr outputValue = getOutputValue(); - IVector::resizeOrCreate(cpuExpandStartsPos_, outputBatchSize, false); - int* expandStarts = cpuExpandStartsPos_->getData(); + ICpuGpuVector::resizeOrCreate(expandStartsPos_, outputBatchSize, false); + int* expandStarts = expandStartsPos_->getMutableData(false); for (size_t sequenceId = 0; sequenceId < numSequences; ++sequenceId) { int sequenceLength = starts[sequenceId + 1] - starts[sequenceId]; for (int j = 0; j < sequenceLength; j++) { @@ -90,15 +87,8 @@ void ExpandLayer::forward(PassType passType) { } } - if (useGpu_) { - // TODO(Dangqingqing) move copyFrom - IVector::resizeOrCreate(expandStartsPos_, outputBatchSize, true); - expandStartsPos_->copyFrom(*cpuExpandStartsPos_, HPPL_STREAM_DEFAULT); - } else { - expandStartsPos_ = cpuExpandStartsPos_; - } - - outputValue->copyByRowIndex(*inputValue, *expandStartsPos_); + outputValue->copyByRowIndex(*inputValue, + *expandStartsPos_->getVector(useGpu_)); if (biases_.get() != NULL) { outputValue->addBias(*(biases_->getW()), 1); @@ -108,16 +98,15 @@ void ExpandLayer::forward(PassType passType) { void ExpandLayer::backward(const UpdateCallback& callback) { if (biases_ && biases_->getWGrad()) { biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - /* Increasing the number of gradient */ + /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); } if (!getInputGrad(0)) return; MatrixPtr inputGrad = getInputGrad(0); MatrixPtr outputGrad = getOutputGrad(); - auto cpuSeqStartPos = - type_ ? getInput(1).subSequenceStartPositions - : getInput(1).sequenceStartPositions; + auto cpuSeqStartPos = type_ ? getInput(1).subSequenceStartPositions + : getInput(1).sequenceStartPositions; size_t numSequences = cpuSeqStartPos->getSize() - 1; const int* starts = cpuSeqStartPos->getData(false); diff --git a/paddle/gserver/layers/ExpandLayer.h b/paddle/gserver/layers/ExpandLayer.h index 8a3eb1c973a47..fbe0ced9b1754 100644 --- a/paddle/gserver/layers/ExpandLayer.h +++ b/paddle/gserver/layers/ExpandLayer.h @@ -44,14 +44,9 @@ class ExpandLayer : public Layer { enum ExpandLevel { kNonSeq = 0, kSeq = 1 }; /// store the ExpandLevel int type_; - // TODO(luotao) use ICpuGpuVectorPtr to merge cpuExpandStartsPos_ - // and expandStartsPos_ /// expanded sequenceStartPositions or subSequenceStartPositions /// of input[1] - IVectorPtr cpuExpandStartsPos_; - /// point to cpuExpandStartsPos_ when useGpu_ is false, - /// copy from cpuExpandStartsPos_ when useGpu_ is true - IVectorPtr expandStartsPos_; + ICpuGpuVectorPtr expandStartsPos_; public: explicit ExpandLayer(const LayerConfig& config) : Layer(config) {} diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index a6ff2f3b35d04..78519ce7aa874 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -282,13 +282,13 @@ void GpuMatrix::copyFrom(const IVector& src) { copyFrom(matrix); } -void GpuMatrix::copyByRowIndex(Matrix& b, IVector& rowIndex) { +void GpuMatrix::copyByRowIndex(Matrix& b, const IVector& rowIndex) { size_t height = getHeight(); size_t width = getWidth(); CHECK_EQ(b.getWidth(), width); real* dst = getData(); real* src = b.getData(); - int* index = rowIndex.getData(); + const int* index = rowIndex.getData(); hl_sequence2batch_copy(dst, src, index, width, height, true); } @@ -1278,11 +1278,11 @@ void CpuMatrix::copyFrom(const IVector& src) { } } -void CpuMatrix::copyByRowIndex(Matrix& b, IVector& rowIndex) { +void CpuMatrix::copyByRowIndex(Matrix& b, const IVector& rowIndex) { size_t height = getHeight(); size_t width = getWidth(); CHECK_EQ(b.getWidth(), width); - int* index = rowIndex.getData(); + const int* index = rowIndex.getData(); for (size_t i = 0; i < height; i++) { CHECK_LT(static_cast(index[i]), b.getHeight()); real* src = b.getData() + index[i] * width; diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 5c15c94012816..25104fe1c6d70 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -253,7 +253,7 @@ class Matrix : public BaseMatrix { LOG(FATAL) << "copy data from int vector only available on CpuMatrix."; } - virtual void copyByRowIndex(Matrix& b, IVector& rowIndex) { + virtual void copyByRowIndex(Matrix& b, const IVector& rowIndex) { LOG(FATAL) << "Not implemented"; } @@ -979,7 +979,7 @@ class GpuMatrix : public Matrix { void copyFrom(const IVector& src); - void copyByRowIndex(Matrix& b, IVector& rowIndex); + void copyByRowIndex(Matrix& b, const IVector& rowIndex); MatrixPtr clone(size_t height, size_t width, bool useGpu = false); @@ -1241,7 +1241,7 @@ class CpuMatrix : public Matrix { void copyFrom(CpuSparseMatrix& src); - void copyByRowIndex(Matrix& b, IVector& rowIndex); + void copyByRowIndex(Matrix& b, const IVector& rowIndex); MatrixPtr clone(size_t height, size_t width, bool useGpu = false); From c13bdb15cdabda2a68c1eca470d612f079538d27 Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 15 Oct 2016 06:54:52 -0700 Subject: [PATCH 025/180] remove redundant HPPL_TYPE_DOUBLE (#200) --- CMakeLists.txt | 2 +- paddle/cuda/include/hl_base.h | 2 +- paddle/cuda/include/hl_cpu_gru.cuh | 2 +- paddle/cuda/include/hl_gpu_functions.cuh | 4 ++-- paddle/cuda/include/hl_matrix_base.cuh | 2 +- paddle/cuda/include/hl_matrix_type.cuh | 4 ++-- paddle/cuda/include/hl_sse_matrix_kernel.cuh | 4 ++-- paddle/cuda/src/hl_cuda_cublas.cc | 2 +- paddle/cuda/src/hl_cuda_cudnn.cc | 10 +++++----- paddle/cuda/src/hl_cuda_device.cc | 2 +- paddle/cuda/src/hl_cuda_matrix.cu | 4 ++-- paddle/cuda/src/hl_cuda_sparse.cuh | 2 +- 12 files changed, 20 insertions(+), 20 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 44e93f22c0eaf..b85709f807bc3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,7 +104,7 @@ else() endif(NOT WITH_GPU) if(WITH_DOUBLE) - add_definitions(-DPADDLE_TYPE_DOUBLE -DHPPL_TYPE_DOUBLE) + add_definitions(-DPADDLE_TYPE_DOUBLE) set(ACCURACY double) else(WITH_DOUBLE) set(ACCURACY float) diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 77e2649b17214..1fe2774cc5a29 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -185,7 +185,7 @@ typedef struct { size_t nnz; } _hl_sparse_matrix_s, *hl_sparse_matrix_s; -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE /** * HPPL data type: real (float or double) * diff --git a/paddle/cuda/include/hl_cpu_gru.cuh b/paddle/cuda/include/hl_cpu_gru.cuh index cba1c9f30da8d..d39cf67448b4f 100644 --- a/paddle/cuda/include/hl_cpu_gru.cuh +++ b/paddle/cuda/include/hl_cpu_gru.cuh @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/math/MathFunctions.h" -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE #define CBLAS_GEMM paddle::gemm #else #define CBLAS_GEMM paddle::gemm diff --git a/paddle/cuda/include/hl_gpu_functions.cuh b/paddle/cuda/include/hl_gpu_functions.cuh index 38df4eb8958f2..a2c5ebd18a440 100644 --- a/paddle/cuda/include/hl_gpu_functions.cuh +++ b/paddle/cuda/include/hl_gpu_functions.cuh @@ -28,7 +28,7 @@ namespace hppl { const real min = SIGMOID_THRESHOLD_MIN; const real max = SIGMOID_THRESHOLD_MAX; real tmp = (a < min) ? min : ((a > max) ? max : a); -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE return __fdividef(1.0f, 1.0f + __expf(-tmp)); #else return 1.0 / (1.0 + exp(-tmp)); @@ -36,7 +36,7 @@ namespace hppl { } __device__ static real tanh(const real a) { -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE return __fdividef(2.0f, (1.0f + __expf(-2.0f*a))) - 1.0f; #else return (2.0 / (1.0 + exp(-2.0*a))) - 1.0; diff --git a/paddle/cuda/include/hl_matrix_base.cuh b/paddle/cuda/include/hl_matrix_base.cuh index 473d394c0c688..a3645ef51e6ef 100644 --- a/paddle/cuda/include/hl_matrix_base.cuh +++ b/paddle/cuda/include/hl_matrix_base.cuh @@ -30,7 +30,7 @@ limitations under the License. */ #define INLINE inline #endif -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE #define DEVICE_FMAX fmaxf #define DEVICE_FMIN fminf #else diff --git a/paddle/cuda/include/hl_matrix_type.cuh b/paddle/cuda/include/hl_matrix_type.cuh index 6917f36290141..51e483d1fb2ff 100644 --- a/paddle/cuda/include/hl_matrix_type.cuh +++ b/paddle/cuda/include/hl_matrix_type.cuh @@ -21,7 +21,7 @@ limitations under the License. */ #ifdef __CUDA_ARCH__ // typedef void* vecType; #include -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE typedef float4 vecType; #else typedef double2 vecType; @@ -30,7 +30,7 @@ typedef double2 vecType; #include #include #include -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE typedef __m128 vecType; #else typedef __m128d vecType; diff --git a/paddle/cuda/include/hl_sse_matrix_kernel.cuh b/paddle/cuda/include/hl_sse_matrix_kernel.cuh index c90d49e4adeb5..45db2f313e0d6 100644 --- a/paddle/cuda/include/hl_sse_matrix_kernel.cuh +++ b/paddle/cuda/include/hl_sse_matrix_kernel.cuh @@ -20,7 +20,7 @@ limitations under the License. */ #define VECTOR_SIZE 16 -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE /* number of float in vector */ #define VECTOR_LEN 4 #define VECTOR_SET _mm_set_ps1 @@ -41,7 +41,7 @@ inline bool hl_check_align(void *ptr) { return hl_check_align(reinterpret_cast(ptr)); } -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE template inline real hl_agg_op(Agg agg, vecType mm) { __m128 lo = _mm_unpacklo_ps(mm, mm); diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/cuda/src/hl_cuda_cublas.cc index dc109487ded20..b3c9001ba3973 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/cuda/src/hl_cuda_cublas.cc @@ -84,7 +84,7 @@ CUBLAS_BLAS_ROUTINE_EACH(DYNAMIC_LOAD_CUBLAS_V2_WRAP) } /* namespace dynload */ -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE #define CUBLAS_GEAM dynload::cublasSgeam #define CUBLAS_GEMV dynload::cublasSgemv #define CUBLAS_GEMM dynload::cublasSgemm diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index c2dce1977bdf5..b215c0f6e33a1 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -340,7 +340,7 @@ void hl_create_tensor_descriptor(hl_tensor_descriptor* image_desc, (cudnn_tensor_descriptor)malloc(sizeof(_cudnn_tensor_descriptor)); CHECK_NOTNULL(hl_desc); -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE cudnnDataType_t data_type = CUDNN_DATA_FLOAT; #else cudnnDataType_t data_type = CUDNN_DATA_DOUBLE; @@ -373,7 +373,7 @@ void hl_create_tensor_descriptor(hl_tensor_descriptor* image_desc) { (cudnn_tensor_descriptor)malloc(sizeof(_cudnn_tensor_descriptor)); CHECK_NOTNULL(hl_desc); -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE cudnnDataType_t data_type = CUDNN_DATA_FLOAT; #else cudnnDataType_t data_type = CUDNN_DATA_DOUBLE; @@ -611,7 +611,7 @@ void hl_create_filter_descriptor(hl_filter_descriptor* filter, CHECK_CUDNN(dynload::cudnnCreateFilterDescriptor(&hl_filter->desc)); -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE cudnnDataType_t data_type = CUDNN_DATA_FLOAT; #else cudnnDataType_t data_type = CUDNN_DATA_DOUBLE; @@ -921,7 +921,7 @@ void hl_softmax_forward(real *input, int height, int width) { -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE cudnnDataType_t data_type = CUDNN_DATA_FLOAT; #else cudnnDataType_t data_type = CUDNN_DATA_DOUBLE; @@ -955,7 +955,7 @@ void hl_softmax_backward(real *output_value, int height, int width) { -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE cudnnDataType_t data_type = CUDNN_DATA_FLOAT; #else cudnnDataType_t data_type = CUDNN_DATA_DOUBLE; diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index f4c07367b485b..e9fe9f1c117a0 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -626,7 +626,7 @@ void hl_specify_devices_start(int* device, int number) { void hl_rand(real *dest_d, size_t num) { pthread_mutex_lock(t_resource.gen_mutex); CHECK_EQ( -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE dynload::curandGenerateUniform(t_resource.gen, dest_d, num), #else dynload::curandGenerateUniformDouble(t_resource.gen, dest_d, num), diff --git a/paddle/cuda/src/hl_cuda_matrix.cu b/paddle/cuda/src/hl_cuda_matrix.cu index 38e4f16217c2a..067e68c41e119 100644 --- a/paddle/cuda/src/hl_cuda_matrix.cu +++ b/paddle/cuda/src/hl_cuda_matrix.cu @@ -47,7 +47,7 @@ void hl_matrix_add(real *A_d, CHECK_SYNC("hl_matrix_add failed"); } -#ifdef HPPL_TYPE_DOUBLE +#ifdef PADDLE_TYPE_DOUBLE #define THRESHOLD 128 #else #define THRESHOLD 64 @@ -102,7 +102,7 @@ void subMaxAndExp(real* I, val = -THRESHOLD; } I[nextIdx] = val; -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE O[nextIdx] = __expf(val); #else O[nextIdx] = exp(val); diff --git a/paddle/cuda/src/hl_cuda_sparse.cuh b/paddle/cuda/src/hl_cuda_sparse.cuh index 13e89390d68c2..c3b98f4ebc38d 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cuh +++ b/paddle/cuda/src/hl_cuda_sparse.cuh @@ -355,7 +355,7 @@ __global__ void KeSMatrixCscMulDense(real *C_d, } /* best perf */ -#ifndef HPPL_TYPE_DOUBLE +#ifndef PADDLE_TYPE_DOUBLE #define CU_CSCMM_THREAD_M_BEST 9 #else #define CU_CSCMM_THREAD_M_BEST 4 From 6d21ecef545a5c400e2e9a3ef812404132fb88ee Mon Sep 17 00:00:00 2001 From: Zrachel Date: Mon, 17 Oct 2016 10:18:30 +0800 Subject: [PATCH 026/180] add cost_type constraint to weighted_cost interface (#206) --- python/paddle/trainer/config_parser.py | 1 - python/paddle/trainer_config_helpers/layers.py | 15 +++++---------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index fb47fd0c6f0c3..18f0b1b4e497e 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1715,7 +1715,6 @@ def init(cls, name, inputs, device=None, coeff=1.): g_cost_map[cost_type] = cls define_cost('MultiClassCrossEntropy', 'multi-class-cross-entropy') -define_cost('ClassificationErrorLayer', 'classification_error') define_cost('RankingCost', 'rank-cost') define_cost('AucValidation', 'auc-validation') define_cost('PnpairValidation', 'pnpair-validation') diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 745e61b2eb0bc..686704cb7c9b0 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2799,7 +2799,7 @@ def __cost_input__(input, label, weight=None): @wrap_name_default() -def regression_cost(input, label, weight=None, cost='square_error', name=None): +def regression_cost(input, label, weight=None, name=None): """ Regression Layer. @@ -2814,21 +2814,18 @@ def regression_cost(input, label, weight=None, cost='square_error', name=None): :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput - :param cost: Cost method. - :type cost: basestring :return: LayerOutput object. :rtype: LayerOutput """ ipts, parents = __cost_input__(input, label, weight) - Layer(inputs=ipts, type=cost, name=name) + Layer(inputs=ipts, type="square_error", name=name) return LayerOutput(name, LayerType.COST, parents=parents) @wrap_name_default("cost") @layer_support() def classification_cost(input, label, weight=None, name=None, - cost="multi-class-cross-entropy", evaluator=classification_error_evaluator, layer_attr=None): """ @@ -2843,8 +2840,6 @@ def classification_cost(input, label, weight=None, name=None, :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput - :param cost: cost method. - :type cost: basestring :param evaluator: Evaluator method. :param layer_attr: layer's extra attribute. :type layer_attr: ExtraLayerAttribute @@ -2857,7 +2852,7 @@ def classification_cost(input, label, weight=None, name=None, ipts, parents = __cost_input__(input, label, weight) - Layer(name=name, type=cost, inputs=ipts, + Layer(name=name, type="multi-class-cross-entropy", inputs=ipts, **ExtraLayerAttribute.to_kwargs(layer_attr)) def __add_evaluator__(e): @@ -3819,8 +3814,8 @@ def multi_binary_label_cross_entropy(input, label, name=None, coeff=1.0): if input.activation is None or \ not isinstance(input.activation, SigmoidActivation): logger.log(logging.WARN, - "%s is not recommend for batch normalization's activation, " - "maybe the relu is better" % repr(input.activation)) + "%s is not recommend for multi_binary_label_cross_entropy's activation, " + "maybe the sigmoid is better" % repr(input.activation)) Layer(name=name, type=LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY, From 4e43a59a183cb09682dcf6ed1506d6210c06a7b7 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 17 Oct 2016 11:03:58 +0800 Subject: [PATCH 027/180] remove unmerged internal documents (#205) --- paddle/internals/scripts/docs.sh | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100755 paddle/internals/scripts/docs.sh diff --git a/paddle/internals/scripts/docs.sh b/paddle/internals/scripts/docs.sh deleted file mode 100755 index 517405c120a47..0000000000000 --- a/paddle/internals/scripts/docs.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright (c) 2016 Baidu, Inc. All Rights Reserved - -cd `dirname $0` - -# Add set -e, cd to directory. -set -e -mkdir -p $PWD/../../../build -cd $PWD/../../../build - -# Compile Documentation only. -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=ON -make paddle_docs paddle_docs_cn - -# remove old docs. mv new docs in deeplearning.baidu.com -scp -r doc/html paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com:/home/paddle_doc/www/doc_new -ssh paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com "cd ~/www/ && rm -r doc && mv doc_new doc" - -scp -r doc_cn/html paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com:/home/paddle_doc/www/doc_cn_new -ssh paddle_doc@yq01-idl-gpu-offline42.yq01.baidu.com "cd ~/www/ && rm -r doc_cn && mv doc_cn_new doc_cn" From e4952ca6cea9130258086b4b7f2dff7a8bf5f566 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 17 Oct 2016 03:30:10 +0000 Subject: [PATCH 028/180] Add FAQ (#128) * Init commit for doing FAQ * Add speed up training * Add graphviz to ci * Add shared paramter * Tiny refine --- .travis.yml | 1 + doc_cn/conf.py.in | 1 + doc_cn/faq/index.rst | 169 ++++++++++++++++++ doc_cn/faq/reduce_min_pool_size.py | 6 + doc_cn/faq/word2vec_config.py | 8 + doc_cn/faq/word2vec_dataprovider.py | 8 + doc_cn/index.rst | 7 + .../paddle/trainer_config_helpers/networks.py | 4 +- .../tests/configs/check.md5 | 2 + .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/shared_fc.py | 22 +++ .../tests/configs/shared_lstm.py | 29 +++ 12 files changed, 256 insertions(+), 3 deletions(-) create mode 100644 doc_cn/faq/index.rst create mode 100644 doc_cn/faq/reduce_min_pool_size.py create mode 100644 doc_cn/faq/word2vec_config.py create mode 100644 doc_cn/faq/word2vec_dataprovider.py create mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_fc.py create mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py diff --git a/.travis.yml b/.travis.yml index 119d01a4fa8fd..bf0e0b7bbddd4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,7 @@ addons: - libgoogle-glog-dev - libgflags-dev - libgtest-dev + - graphviz before_install: - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi diff --git a/doc_cn/conf.py.in b/doc_cn/conf.py.in index 391f7981eab80..93242ace40600 100644 --- a/doc_cn/conf.py.in +++ b/doc_cn/conf.py.in @@ -47,6 +47,7 @@ extensions = [ 'sphinx.ext.autosummary', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', + 'sphinx.ext.graphviz' ] table_styling_embed_css = True diff --git a/doc_cn/faq/index.rst b/doc_cn/faq/index.rst new file mode 100644 index 0000000000000..283607957ce63 --- /dev/null +++ b/doc_cn/faq/index.rst @@ -0,0 +1,169 @@ +#################### +PaddlePaddle常见问题 +#################### + +.. contents:: + +1. 如何减少PaddlePaddle的内存占用 +--------------------------------- + +神经网络的训练本身是一个非常消耗内存和显存的工作。经常会消耗数十G的内存和数G的显存。 +PaddlePaddle的内存占用主要分为如下几个方面\: + +* DataProvider缓冲池内存 (只针对内存) +* 神经元激活内存 (针对内存和显存) +* 参数内存 (针对内存和显存) +* 其他内存杂项 + +这其中,其他内存杂项是指PaddlePaddle本身所用的一些内存,包括字符串分配,临时变量等等, +这些内存就不考虑如何缩减了。 + +其他的内存的减少方法依次为 + + +减少DataProvider缓冲池内存 +++++++++++++++++++++++++++ + +PyDataProvider使用的是异步加载,同时在内存里直接随即选取数据来做Shuffle。即 + +.. graphviz:: + + digraph { + rankdir=LR; + 数据文件 -> 内存池 -> PaddlePaddle训练 + } + +所以,减小这个内存池即可减小内存占用,同时也可以加速开始训练前数据载入的过程。但是,这 +个内存池实际上决定了shuffle的粒度。所以,如果将这个内存池减小,又要保证数据是随机的, +那么最好将数据文件在每次读取之前做一次shuffle。可能的代码为 + +.. literalinclude:: reduce_min_pool_size.py + +这样做可以极大的减少内存占用,并且可能会加速训练过程。 详细文档参考 `这里 +<../ui/data_provider/pydataprovider2.html#provider>`_ 。 + +神经元激活内存 +++++++++++++++ + +神经网络在训练的时候,会对每一个激活暂存一些数据,包括激活,參差等等。 +在反向传递的时候,这些数据会被用来更新参数。这些数据使用的内存主要和两个参数有关系, +一是batch size,另一个是每条序列(Sequence)长度。所以,其实也是和每个mini-batch中包含 +的时间步信息成正比。 + +所以,做法可以有两种。他们是 + +* 减小batch size。 即在网络配置中 :code:`settings(batch_size=1000)` 设置成一个小一些的值。但是batch size本身是神经网络的超参数,减小batch size可能会对训练结果产生影响。 +* 减小序列的长度,或者直接扔掉非常长的序列。比如,一个数据集大部分序列长度是100-200, + 但是突然有一个10000长的序列,就很容易导致内存超限。特别是在LSTM等RNN中。 + +参数内存 +++++++++ + +PaddlePaddle支持非常多的优化算法(Optimizer),不同的优化算法需要使用不同大小的内存。 +例如如果使用 :code:`adadelta` 算法,则需要使用参数规模大约5倍的内存。 如果参数保存下来的 +文件为 :code:`100M`, 那么该优化算法至少需要 :code:`500M` 的内存。 + +可以考虑使用一些优化算法,例如 :code:`momentum`。 + +2. 如何加速PaddlePaddle的训练速度 +--------------------------------- + +PaddlePaddle是神经网络训练平台,加速PaddlePaddle训练有如下几个方面\: + +* 减少数据载入的耗时 +* 加速训练速度 +* 利用更多的计算资源 + +减少数据载入的耗时 +++++++++++++++++++ + +使用 :code:`pydataprovider`时,可以减少缓存池的大小,同时设置内存缓存功能,即可以极大的加速数据载入流程。 +:code:`DataProvider` 缓存池的减小,和之前减小通过减小缓存池来减小内存占用的原理一致。 + +.. literalinclude:: reduce_min_pool_size.py + +同时 :code:`@provider` 接口有一个 :code:`cache` 参数来控制缓存方法,将其设置成 :code:`CacheType.CACHE_PASS_IN_MEM` 的话,会将第一个 :code:`pass` (过完所有训练数据即为一个pass)生成的数据缓存在内存里,在之后的 :code:`pass` 中,不会再从 :code:`python` 端读取数据,而是直接从内存的缓存里读取数据。这也会极大减少数据读入的耗时。 + + +加速训练速度 +++++++++++++ + +PaddlePaddle支持Sparse的训练,sparse训练需要训练特征是 :code:`sparse_binary_vector` 、 :code:`sparse_vector` 、或者 :code:`integer_value` 的任一一种。同时,与这个训练数据交互的Layer,需要将其Parameter设置成 sparse 更新模式,即设置 :code:`sparse_update=True` + +这里使用简单的 :code:`word2vec` 训练语言模型距离,具体使用方法为\: + +使用一个词前两个词和后两个词,来预测这个中间的词。这个任务的DataProvider为\: + +.. literalinclude:: word2vec_dataprovider.py + +这个任务的配置为\: + +.. literalinclude:: word2vec_config.py + +更多关于sparse训练的内容请参考 `sparse训练的文档 `_ + +利用更多的计算资源 +++++++++++++++++++ + +利用更多的计算资源可以分为一下几个方式来进行\: + +* 单机CPU训练 + * 使用多线程训练。设置命令行参数 :code:`trainer_count`,即可以设置参与训练的线程数量。使用方法为 :code:`paddle train --trainer_count=4` +* 单机GPU训练 + * 使用显卡训练。设置命令行参数 :code:`use_gpu`。 使用方法为 :code:`paddle train --use_gpu=true` + * 使用多块显卡训练。设置命令行参数 :code:`use_gpu` 和 :code:`trainer_count`。使用 :code:`--use_gpu=True` 开启GPU训练,使用 :code:`trainer_count` 指定显卡数量。使用方法为 :code:`paddle train --use_gpu=true --trainer_count=4` +* 多机训练 + * 使用多机训练的方法也比较简单,需要先在每个节点启动 :code:`paddle pserver`,在使用 :code:`paddle train --pservers=192.168.100.1,192.168.100.2` 来指定每个pserver的ip地址 + * 具体的多机训练方法参考 `多机训练 `_ 文档。 + + +3. 遇到“非法指令”或者是“illegal instruction” +-------------------------------------------- + +paddle在进行计算的时候为了提升计算性能,使用了avx指令。部分老的cpu型号无法支持这样的指令。通常来说执行下grep avx /proc/cpuinfo看看是否有输出即可知道是否支持。(另:用此方法部分虚拟机可能检测到支持avx指令但是实际运行会挂掉,请当成是不支持,看下面的解决方案) + +解决办法是\: + +* 使用 NO_AVX的 `安装包 <../build_and_install/index.html>`_ 或者 `Docker image <../build_and_install/install/docker_install.html>`_ +* 或者,使用 :code:`-DWITH_AVX=OFF` 重新编译PaddlePaddle。 + + +4. 如何选择SGD算法的学习率 +-------------------------- + +在采用sgd/async_sgd进行训练时,一个重要的问题是选择正确的learning_rate。如果learning_rate太大,那么训练有可能不收敛,如果learning_rate太小,那么收敛可能很慢,导致训练时间过长。 + +通常做法是从一个比较大的learning_rate开始试,如果不收敛,那减少学习率10倍继续试验,直到训练收敛为止。那么如何判断训练不收敛呢?可以估计出如果模型采用不变的输出最小的cost0是多少。 + +如果训练过程的的cost明显高于这个常数输出的cost,那么我们可以判断为训练不收敛。举一个例子,假如我们是三分类问题,采用multi-class-cross-entropy作为cost,数据中0,1,2三类的比例为 :code:`0.2, 0.5, 0.3` , 那么常数输出所能达到的最小cost是 :code:`-(0.2*log(0.2)+0.5*log(0.5)+0.3*log(0.3))=1.03` 。如果训练一个pass(或者更早)后,cost还大于这个数,那么可以认为训练不收敛,应该降低学习率。 + + +5. 如何初始化参数 +----------------- + +默认情况下,PaddlePaddle使用均值0,标准差为 :math:`\frac{1}{\sqrt{d}}` 来初始化参数。其中 :math:`d` 为参数矩阵的宽度。这种初始化方式在一般情况下不会产生很差的结果。如果用户想要自定义初始化方式,PaddlePaddle目前提供两种参数初始化的方式\: + +* 高斯分布。将 :code:`param_attr` 设置成 :code:`param_attr=ParamAttr(initial_mean=0.0, initial_std=1.0)` +* 均匀分布。将 :code:`param_attr` 设置成 :code:`param_attr=ParamAttr(initial_max=1.0, initial_min=-1.0)` + +比如设置一个全连接层的参数初始化方式和bias初始化方式,可以使用如下代码。 + +.. code-block:: python + + hidden = fc_layer(input=ipt, param_attr=ParamAttr(initial_max=1.0, initial_min=-1.0), + bias_attr=ParamAttr(initial_mean=1.0, initial_std=0.0)) + +上述代码将bias全部初始化为1.0, 同时将参数初始化为 :code:`[1.0, -1.0]` 的均匀分布。 + +6. 如何共享参数 +--------------- + +PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字的参数,会共享参数。设置参数的名字,可以使用 :code:`ParamAttr(name="YOUR_PARAM_NAME")` 来设置。更方便的设置方式,是想要共享的参数使用同样的 :code:`ParamAttr` 对象。 + +简单的全连接网络,参数共享的配置示例为\: + +.. literalinclude:: ../../python/paddle/trainer_config_helpers/tests/configs/shared_fc.py + +这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 + + diff --git a/doc_cn/faq/reduce_min_pool_size.py b/doc_cn/faq/reduce_min_pool_size.py new file mode 100644 index 0000000000000..2811b134b66b1 --- /dev/null +++ b/doc_cn/faq/reduce_min_pool_size.py @@ -0,0 +1,6 @@ +@provider(min_pool_size=0, ...) +def process(settings, filename): + os.system('shuf %s > %s.shuf' % (filename, filename)) # shuffle before. + with open('%s.shuf' % filename, 'r') as f: + for line in f: + yield get_sample_from_line(line) \ No newline at end of file diff --git a/doc_cn/faq/word2vec_config.py b/doc_cn/faq/word2vec_config.py new file mode 100644 index 0000000000000..e347252476eab --- /dev/null +++ b/doc_cn/faq/word2vec_config.py @@ -0,0 +1,8 @@ +... # the settings and define data provider is omitted. +DICT_DIM=3000 # dictionary dimension. +word_ids=data_layer('word_ids', size=DICT_DIM) + +emb = embedding_layer(input=word_ids, size=256, param_attr=ParamAttr(sparse_update=True)) +emb_sum = pooling_layer(input=emb, pooling_type=SumPooling()) +predict = fc_layer(input=emb_sum, size=DICT_DIM, act=Softmax()) +outputs(classification_cost(input=predict, label=data_layer('label', size=DICT_DIM))) \ No newline at end of file diff --git a/doc_cn/faq/word2vec_dataprovider.py b/doc_cn/faq/word2vec_dataprovider.py new file mode 100644 index 0000000000000..a0a39080cece9 --- /dev/null +++ b/doc_cn/faq/word2vec_dataprovider.py @@ -0,0 +1,8 @@ +DICT_DIM=3000 +@provider(input_types=[integer_sequence(DICT_DIM), integer_value(DICT_DIM)]) +def process(settings, filename): + with open(filename) as f: + # yield word ids to predict inner word id + # such as [28, 29, 10, 4], 4 + # It means the sentance is 28, 29, 4, 10, 4. + yield read_next_from_file(f) \ No newline at end of file diff --git a/doc_cn/index.rst b/doc_cn/index.rst index 1a4908be14684..d2d50fbdb47f2 100644 --- a/doc_cn/index.rst +++ b/doc_cn/index.rst @@ -3,6 +3,7 @@ PaddlePaddle文档 使用指南 -------- + * `快速入门 `_ * `编译与安装 `_ * `用户接口 `_ @@ -16,7 +17,13 @@ PaddlePaddle文档 算法教程 -------- + * `Recurrent Group教程 `_ * `单层RNN示例 <../doc/algorithm/rnn/rnn.html>`_ * `双层RNN示例 `_ * `支持双层序列作为输入的Layer `_ + +常见问题 +-------- + +* `常见问题 `_ diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index ab4057d9d6c6b..c54ec3096989c 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -20,7 +20,7 @@ IdentityActivation, TanhActivation, SequenceSoftmaxActivation from attrs import ExtraAttr from default_decorators import wrap_name_default, wrap_act_default, \ - wrap_param_default + wrap_param_default, wrap_bias_attr_default, wrap_param_attr_default from layers import * # There are too many layers used in network, so import * from poolings import MaxPooling, SumPooling from paddle.trainer.config_parser import * @@ -505,7 +505,7 @@ def simple_lstm(input, size, name=None, reverse=False, mat_param_attr=None, def lstmemory_unit(input, name=None, size=None, param_attr=None, act=None, gate_act=None, state_act=None, mixed_bias_attr=None, lstm_bias_attr=None, - mixed_layer_attr=None,lstm_layer_attr=None, + mixed_layer_attr=None, lstm_layer_attr=None, get_output_layer_attr=None): """ Define calculations that a LSTM unit performs in a single time step. diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 3ecfff2071630..96bf3fb2e19d6 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -2,6 +2,8 @@ a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 9c038249ec8ff719753a746cdb04c026 layer_activations.protostr 5913f87b39cee3b2701fa158270aca26 projections.protostr +7334ba0a4544f0623231330fc51d390d shared_fc.protostr +8b8b6bb128a7dfcc937be86145f53e2f shared_lstm.protostr 6b39e34beea8dfb782bee9bd3dea9eb5 simple_rnn_layers.protostr 0fc1409600f1a3301da994ab9d28b0bf test_cost_layers.protostr 6cd5f28a3416344f20120698470e0a4c test_cost_layers_with_weight.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 5514ee65e5a62..7cdd682056fd4 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -8,7 +8,7 @@ configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers -test_cost_layers_with_weight test_rnn_group) +test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py new file mode 100644 index 0000000000000..202cf367fc7f2 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.py @@ -0,0 +1,22 @@ +from paddle.trainer_config_helpers import * + +settings( + learning_rate=1e-4, + batch_size=1000 +) + +a = data_layer(name='feature_a', size=200) +b = data_layer(name='feature_b', size=200) + +fc_param = ParamAttr(name='fc_param', initial_max=1.0, initial_min=-1.0) +bias_param = ParamAttr(name='bias_param', initial_mean=0.0, initial_std=0.0) + +softmax_param = ParamAttr(name='softmax_param', initial_max=1.0, initial_min=-1.0) + +hidden_a = fc_layer(input=a, size=200, param_attr=fc_param, bias_attr=bias_param) +hidden_b = fc_layer(input=b, size=200, param_attr=fc_param, bias_attr=bias_param) + +predict = fc_layer(input=[hidden_a, hidden_b], param_attr=[softmax_param, softmax_param], + bias_attr=False, size=10, act=SoftmaxActivation()) + +outputs(classification_cost(input=predict, label=data_layer(name='label', size=10))) diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py new file mode 100644 index 0000000000000..8557e9daaf66a --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.py @@ -0,0 +1,29 @@ +from paddle.trainer_config_helpers import * + +settings(learning_rate=1e-4, batch_size=1000) + +data_1 = data_layer(name='data_a', size=100) +data_2 = data_layer(name='data_b', size=100) + +mixed_param = ParamAttr(name='mixed_param') + +with mixed_layer(size=400, bias_attr=False) as m1: + m1 += full_matrix_projection(input=data_1, param_attr=mixed_param) + +with mixed_layer(size=400, bias_attr=False) as m2: + m2 += full_matrix_projection(input=data_2, param_attr=mixed_param) + +lstm_param = ParamAttr(name='lstm_param') +lstm_bias = ParamAttr(name='lstm_bias', initial_mean=0., initial_std=0.) + +lstm1 = lstmemory_group(input=m1, param_attr=lstm_param, lstm_bias_attr=lstm_bias, mixed_bias_attr=False) +lstm2 = lstmemory_group(input=m2, param_attr=lstm_param, lstm_bias_attr=lstm_bias, mixed_bias_attr=False) + +softmax_param = ParamAttr(name='softmax_param') + +predict = fc_layer(input=[last_seq(input=lstm1), last_seq(input=lstm2)], + size=10, + param_attr=[softmax_param, softmax_param], + bias_attr=False, + act=SoftmaxActivation()) +outputs(classification_cost(input=predict, label=data_layer(name='label', size=10))) From 2f82d72ede17822f52a789e92afca6f8112bc44e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 17 Oct 2016 07:17:08 +0000 Subject: [PATCH 029/180] Fix bug in yield dictionary in DataProvider. (#197) * Fix bug in yield dictionary in DataProvider. * Also make virtualenv work in Paddle. --- CMakeLists.txt | 2 +- cmake/util.cmake | 17 ++++++++ demo/mnist/data/get_mnist_data.sh | 0 demo/mnist/mnist_provider.py | 19 ++++----- demo/mnist/vgg_16_mnist.py | 1 + .../ui/data_provider/mnist_provider.dict.py | 10 ++--- doc_cn/ui/data_provider/pydataprovider2.rst | 2 - .../gserver/dataproviders/PyDataProvider2.cpp | 3 +- paddle/gserver/tests/test_PyDataProvider2.cpp | 2 +- paddle/gserver/tests/test_PyDataProvider2.py | 2 +- paddle/utils/.gitignore | 1 + paddle/utils/CMakeLists.txt | 6 ++- paddle/utils/PythonUtil.cpp | 31 ++++++++++---- paddle/utils/PythonUtil.h | 2 + paddle/utils/enable_virtualenv.py | 10 +++++ python/paddle/trainer/PyDataProvider2.py | 24 ++++++----- python/paddle/trainer/config_parser.py | 4 ++ .../paddle/trainer_config_helpers/networks.py | 42 ++++++++++++++----- 18 files changed, 126 insertions(+), 52 deletions(-) mode change 100644 => 100755 demo/mnist/data/get_mnist_data.sh create mode 100644 paddle/utils/.gitignore create mode 100644 paddle/utils/enable_virtualenv.py diff --git a/CMakeLists.txt b/CMakeLists.txt index b85709f807bc3..4613155f7700b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 2.8) project(paddle CXX C) set(PADDLE_MAJOR_VERSION 0) set(PADDLE_MINOR_VERSION 8) -set(PADDLE_PATCH_VERSION 0b1) +set(PADDLE_PATCH_VERSION 0b2) set(PADDLE_VERSION ${PADDLE_MAJOR_VERSION}.${PADDLE_MINOR_VERSION}.${PADDLE_PATCH_VERSION}) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") diff --git a/cmake/util.cmake b/cmake/util.cmake index d776c3ae49952..0fa36f070cc11 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -184,3 +184,20 @@ macro(add_paddle_culib TARGET_NAME) cuda_add_library(${TARGET_NAME} STATIC ${ARGN}) set(CUDA_NVCC_FLAGS ${NVCC_FLAG}) endmacro() + + +# Creates C resources file from files in given resource file +function(create_resources res_file output) + # Create empty output file + file(WRITE ${output} "") + # Get short filename + string(REGEX MATCH "([^/]+)$" filename ${res_file}) + # Replace filename spaces & extension separator for C compatibility + string(REGEX REPLACE "\\.| |-" "_" filename ${filename}) + # Read hex data from file + file(READ ${res_file} filedata HEX) + # Convert hex data for C compatibility + string(REGEX REPLACE "([0-9a-f][0-9a-f])" "0x\\1," filedata ${filedata}) + # Append data to output file + file(APPEND ${output} "const unsigned char ${filename}[] = {${filedata}};\nconst unsigned ${filename}_size = sizeof(${filename});\n") +endfunction() diff --git a/demo/mnist/data/get_mnist_data.sh b/demo/mnist/data/get_mnist_data.sh old mode 100644 new mode 100755 diff --git a/demo/mnist/mnist_provider.py b/demo/mnist/mnist_provider.py index 0f14ded2dce93..32af29730a736 100644 --- a/demo/mnist/mnist_provider.py +++ b/demo/mnist/mnist_provider.py @@ -2,10 +2,10 @@ # Define a py data provider -@provider(input_types=[ - dense_vector(28 * 28), - integer_value(10) -]) +@provider(input_types={ + 'pixel': dense_vector(28 * 28), + 'label': integer_value(10) +}) def process(settings, filename): # settings is not used currently. imgf = filename + "-images-idx3-ubyte" labelf = filename + "-labels-idx1-ubyte" @@ -14,20 +14,19 @@ def process(settings, filename): # settings is not used currently. f.read(16) l.read(8) - + # Define number of samples for train/test if "train" in filename: n = 60000 else: n = 10000 - + for i in range(n): label = ord(l.read(1)) pixels = [] - for j in range(28*28): + for j in range(28 * 28): pixels.append(float(ord(f.read(1))) / 255.0) - yield { "pixel": pixels, 'label': label } - + yield {"pixel": pixels, 'label': label} + f.close() l.close() - \ No newline at end of file diff --git a/demo/mnist/vgg_16_mnist.py b/demo/mnist/vgg_16_mnist.py index ad0a4de3215ca..45a45bb061aa7 100644 --- a/demo/mnist/vgg_16_mnist.py +++ b/demo/mnist/vgg_16_mnist.py @@ -47,6 +47,7 @@ if not is_predict: lbl = data_layer(name="label", size=label_size) + inputs(img, lbl) outputs(classification_cost(input=predict, label=lbl)) else: outputs(predict) diff --git a/doc_cn/ui/data_provider/mnist_provider.dict.py b/doc_cn/ui/data_provider/mnist_provider.dict.py index 4eab5b1fd3b50..bf13b56372b56 100644 --- a/doc_cn/ui/data_provider/mnist_provider.dict.py +++ b/doc_cn/ui/data_provider/mnist_provider.dict.py @@ -2,10 +2,10 @@ # Define a py data provider -@provider(input_types=[ - dense_vector(28 * 28), - integer_value(10) -]) +@provider(input_types={ + 'pixel': dense_vector(28 * 28), + 'label': integer_value(10) +}) def process(settings, filename): # settings is not used currently. f = open(filename, 'r') # open one of training file @@ -20,6 +20,6 @@ def process(settings, filename): # settings is not used currently. pixels_float.append(float(each_pixel_str)) # give data to paddle. - yield { "pixel": pixels_float, 'label': int(label) } + yield {"pixel": pixels_float, 'label': int(label)} f.close() # close file diff --git a/doc_cn/ui/data_provider/pydataprovider2.rst b/doc_cn/ui/data_provider/pydataprovider2.rst index 9e1d8c531f5ba..80b40084d8f50 100644 --- a/doc_cn/ui/data_provider/pydataprovider2.rst +++ b/doc_cn/ui/data_provider/pydataprovider2.rst @@ -141,8 +141,6 @@ DataProvider创建的时候执行。这个初始化函数具有如下参数: 是一个batch size,但是有时为了计算均衡性,可以将一条数据设置成多个batch size * cache 是数据缓存的策略,参考 `cache`_ * init_hook 是初始化时调用的函数,参考 `init_hook`_ -* use_dynamic_order 如果是true的话,可以返回一个dict,key是data_layer的名字,value是特征值。同时,也可以 - 返回一个list或者tuple。如果是false的话,只能够返回list或者tuple * check 设置成true的话,会根据input_types检查数据的合法性。 * check_fail_continue 如果设置成true的话,即使在check中数据不合法,也会扔到这条数据,继续训练。 如果 check是false的话,没有作用。 diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index 2f9a1223c6e45..e3e472ac166c2 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -246,8 +246,7 @@ class PyDataProvider2 : public DataProvider { PyObjectPtr && kwargs) { LOG(INFO) << "loading dataprovider " << model <<"::" << className; - PyObjectPtr module(PyImport_ImportModule(model.c_str())); - CHECK_PY(module) << "Cannot imort module " << model.c_str(); + PyObjectPtr module = py::import(model); PyObjectPtr moduleDict(PyModule_GetDict(module.get())); CHECK_PY(moduleDict) << "Invoke module.__dict__ error"; PyObjectPtr cls(PyDict_GetItemString(moduleDict.get(), diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index e75e53ab7f431..6bf1e32925121 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -117,7 +117,7 @@ TEST(PyDataProvider2, index_no_seq) { } TEST(PyDataProvider2, init_hook) { - paddle::PyObjectPtr pickle(PyImport_ImportModule("pickle")); + paddle::PyObjectPtr pickle = paddle::py::import("pickle"); paddle::PyObjectPtr globals( PyModule_GetDict(PyImport_AddModule("__main__"))); PyDict_SetItemString(globals.get(), "pickle", pickle.get()); diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index 145fe85cff7d8..71c3335231e52 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -86,7 +86,7 @@ def test_can_over_batch_size(setting, filename): yield [random.randint(0, 100 - 1) for _ in xrange(seq_len)] -@provider(input_types=[index_slot(10), index_slot(10)]) +@provider(input_types={'input1':index_slot(10), 'input2': index_slot(10)}) def test_input_order(setting, filename): for _ in xrange(1000): yield { diff --git a/paddle/utils/.gitignore b/paddle/utils/.gitignore new file mode 100644 index 0000000000000..f2cfd7409412d --- /dev/null +++ b/paddle/utils/.gitignore @@ -0,0 +1 @@ +enable_virtualenv.c diff --git a/paddle/utils/CMakeLists.txt b/paddle/utils/CMakeLists.txt index 0557b01e36f07..45240b5002aa1 100644 --- a/paddle/utils/CMakeLists.txt +++ b/paddle/utils/CMakeLists.txt @@ -2,6 +2,9 @@ file(GLOB UTIL_HEADERS . *.h) file(GLOB UTIL_SOURCES . *.cpp) +create_resources(enable_virtualenv.py enable_virtualenv.c) +set(UTIL_RES enable_virtualenv.c) + if(APPLE) file(GLOB UTIL_ARCH_SOURCES . arch/osx/*.cpp) else() @@ -9,7 +12,8 @@ else() endif() add_library(paddle_utils STATIC ${UTIL_SOURCES} - ${UTIL_ARCH_SOURCES}) + ${UTIL_ARCH_SOURCES} + ${UTIL_RES}) add_style_check_target(paddle_utils ${UTIL_HEADERS}) add_style_check_target(paddle_utils ${UTIL_SOURCES} ${UTIL_ARCH_SOURCES}) diff --git a/paddle/utils/PythonUtil.cpp b/paddle/utils/PythonUtil.cpp index 78c3a80674f9c..90e5093f96ea4 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/utils/PythonUtil.cpp @@ -77,11 +77,18 @@ static std::recursive_mutex g_pyMutex; PyGuard::PyGuard() : guard_(g_pyMutex) {} -static void printPyErrorStack(std::ostream& os, bool withEndl = false) { +static void printPyErrorStack(std::ostream& os, bool withEndl = false, + bool withPyPath = true) { PyObject * ptype, *pvalue, *ptraceback; PyErr_Fetch(&ptype, &pvalue, &ptraceback); PyErr_NormalizeException(&ptype, &pvalue, &ptraceback); PyErr_Clear(); + if (withPyPath) { + os << "Current PYTHONPATH: " << py::repr(PySys_GetObject(strdup("path"))); + if (withEndl) { + os << std::endl; + } + } PyTracebackObject* obj = (PyTracebackObject*)ptraceback; os << "Python Error: " << PyString_AsString(PyObject_Str(ptype)) @@ -114,10 +121,7 @@ PyObjectPtr callPythonFuncRetPyObj(const std::string& moduleName, const std::string& funcName, const std::vector& args) { PyGuard guard; - PyObjectPtr pyModuleName(PyString_FromString(moduleName.c_str())); - CHECK_PY(pyModuleName) << "Import PyModule failed" << moduleName; - PyObjectPtr pyModule(PyImport_Import(pyModuleName.get())); - CHECK_PY(pyModule) << "Import Python Module"<< moduleName << " failed."; + PyObjectPtr pyModule = py::import(moduleName); PyObjectPtr pyFunc(PyObject_GetAttrString(pyModule.get(), funcName.c_str())); CHECK_PY(pyFunc) << "GetAttrString failed."; PyObjectPtr pyArgs(PyTuple_New(args.size())); @@ -143,7 +147,7 @@ PyObjectPtr createPythonClass( const std::vector& args, const std::map& kwargs) { PyGuard guard; - PyObjectPtr pyModule(PyImport_ImportModule(moduleName.c_str())); + PyObjectPtr pyModule = py::import(moduleName); LOG(INFO) << "createPythonClass moduleName.c_str:" << moduleName.c_str(); CHECK_PY(pyModule) << "Import module " << moduleName << " failed."; PyObjectPtr pyDict(PyModule_GetDict(pyModule.get())); @@ -181,18 +185,29 @@ std::string getPyCallStack() { printPyErrorStack(os, true); return os.str(); } + +PyObjectPtr import(const std::string &moduleName) { + auto module = PyImport_ImportModule(moduleName.c_str()); + CHECK_PY(module) << "Import " << moduleName << "Error"; + return PyObjectPtr(module); +} + } // namespace py #endif - +extern "C" { +extern const char enable_virtualenv_py[]; +} void initPython(int argc, char** argv) { #ifndef PADDLE_NO_PYTHON Py_SetProgramName(argv[0]); Py_Initialize(); PySys_SetArgv(argc, argv); - // python blocks SIGINT. Need to enable it. signal(SIGINT, SIG_DFL); + + // Manually activate virtualenv when user is using virtualenv + PyRun_SimpleString(enable_virtualenv_py); #endif } diff --git a/paddle/utils/PythonUtil.h b/paddle/utils/PythonUtil.h index db02d1252b405..00fc177022ac3 100644 --- a/paddle/utils/PythonUtil.h +++ b/paddle/utils/PythonUtil.h @@ -87,6 +87,8 @@ PyObjectPtr createPythonClass(const std::string& moduleName, CHECK((x) != nullptr) << ::paddle::py::getPyCallStack() namespace py { +PyObjectPtr import(const std::string& moduleName); + /** * Cast a PyLong or PyInt to int type T. * @tparam T return type. diff --git a/paddle/utils/enable_virtualenv.py b/paddle/utils/enable_virtualenv.py new file mode 100644 index 0000000000000..99d822a4145cc --- /dev/null +++ b/paddle/utils/enable_virtualenv.py @@ -0,0 +1,10 @@ +import os + +def __activate_virtual_env__(): + __path__ = os.getenv('VIRTUAL_ENV') + if __path__ is None: + return + __script__ = os.path.join(__path__, 'bin', 'activate_this.py') + execfile(__script__, {'__file__': __script__}) + +__activate_virtual_env__() diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index 34f5dd41b7e68..53409b746d811 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -208,7 +208,6 @@ def provider(input_types=None, should_shuffle=None, pool_size=-1, calc_batch_size=None, cache=CacheType.NO_CACHE, check=False, check_fail_continue=False, - use_dynamic_order=True, init_hook=None, **kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. @@ -228,9 +227,15 @@ def process(settings, file_name): The configuration of data provider should be setup by\: :param input_types: Specify the input types, can also be set in init_hook. - It is a list of InputType object. For example, input_types= \ - [dense_vector(9), integer_value(2)]. - :type input_types: list|tuple + It could be a list of InputType object. For example, + input_types=[dense_vector(9), integer_value(2)]. Or user + can set a dict of InputType object, which key is + data_layer's name. For example, input_types=\ + {'img': img_features, 'label': label}. when using dict of + InputType, user could yield a dict of feature values, which + key is also data_layer's name. + + :type input_types: list|tuple|dict :param should_shuffle: True if data should shuffle. Pass None means shuffle when is training and not to shuffle when is testing. @@ -281,12 +286,6 @@ def process(settings, file_name): drop the wrong format data when it is True. Has no effect when check set to False. :type check_fail_continue: bool - - :param use_dynamic_order: Allow provider to yield a dictionary object, whose - key is a input data layer name, and value is the - feature value. The tuples are still allowed when - use_dynmaic_order is True. - :type use_dynamic_order: bool """ def __wrapper__(generator): @@ -340,6 +339,11 @@ def __init__(self, file_list, **kwargs): assert self.slots is not None assert self.generator is not None + use_dynamic_order = False + if isinstance(self.slots, dict): # reorder input_types + self.slots = [self.slots[ipt] for ipt in self.input_order] + use_dynamic_order = True + if len(self.slots) == 1: self.generator = SingleSlotWrapper(self.generator) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 18f0b1b4e497e..c1e74c7a2d8f7 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -216,6 +216,10 @@ def Inputs(*args): if g_current_submodel is g_root_submodel: g_config.model_config.input_layer_names.append(name) +@config_func +def HasInputsSet(): + return len(g_config.model_config.input_layer_names) != 0 + # Define the name of the output layers of the NeuralNetwork. # Usually the output is simply the cost layer. diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index c54ec3096989c..d8f96195020b4 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -30,7 +30,7 @@ 'lstmemory_unit', 'small_vgg', 'img_conv_group', 'vgg_16_network', 'gru_unit', 'gru_group', 'simple_gru', 'simple_attention', 'text_conv_pool', - 'bidirectional_lstm', 'outputs'] + 'bidirectional_lstm', 'inputs', 'outputs'] ###################################################### @@ -372,8 +372,8 @@ def __vgg__(ipt, num_filter, times, dropouts, num_channels_=None): tmp = __vgg__(tmp, 128, 2, [0.4, 0]) tmp = __vgg__(tmp, 256, 3, [0.4, 0.4, 0]) tmp = __vgg__(tmp, 512, 3, [0.4, 0.4, 0]) - tmp = img_pool_layer(input = tmp, stride = 2, - pool_size = 2, pool_type = MaxPooling()) + tmp = img_pool_layer(input=tmp, stride=2, + pool_size=2, pool_type=MaxPooling()) tmp = dropout_layer(input=tmp, dropout_rate=0.5) tmp = fc_layer(input=tmp, size=512, layer_attr=ExtraAttr(drop_rate=0.5), act=LinearActivation()) @@ -745,7 +745,6 @@ def gru_group(input, gru_bias_attr=None, act=None, gate_act=None, gru_layer_attr=None): - """ gru_group is a recurrent layer group version Gated Recurrent Unit. It does exactly the same calculation as the grumemory layer does. A promising @@ -919,12 +918,12 @@ def bidirectional_lstm(input, size, name=None, return_seq=False, fw = simple_lstm(name='%s_fw' % name, input=input, size=size, **dict((k[len('fwd_'):], v) for k, v in args.iteritems() - if k.startswith('fwd_'))) + if k.startswith('fwd_'))) bw = simple_lstm(name="%s_bw" % name, input=input, size=size, reverse=True, **dict((k[len('bwd_'):], v) for k, v in args.iteritems() - if k.startswith('bwd_'))) + if k.startswith('bwd_'))) if return_seq: return concat_layer(name=name, input=[fw, bw], layer_attr=concat_attr, @@ -1052,14 +1051,30 @@ def dropout_layer(input, dropout_rate, name=None): layer_attr=ExtraAttr(drop_rate=dropout_rate)) -def outputs(layers, *args): +def inputs(layers, *args): + """ + Declare the inputs of network. The order of input should be as same as + the data provider's return order. + + :param layers: Input Layers. + :type layers: list|tuple|LayerOutput. + :return: """ - Declare the end of network. Currently it will only calculate the - input/output order of network. It will calculate the predict network or - train network's output automatically. + if isinstance(layers, LayerOutput) or isinstance(layers, basestring): + layers = [layers] + if len(args) != 0: + layers.extend(args) - :param layers: + Inputs(*[l.name for l in layers]) + + +def outputs(layers, *args): + """ + Declare the outputs of network. If user have not defined the inputs of + network, this method will calculate the input order by dfs travel. + + :param layers: Output layers. :type layers: list|tuple|LayerOutput :return: """ @@ -1093,6 +1108,11 @@ def __dfs_travel__(layer, layers.extend(args) assert len(layers) > 0 + + if HasInputsSet(): # input already set + Outputs(*[l.name for l in layers]) + return # just return outputs. + if len(layers) != 1: logger.warning("`outputs` routine try to calculate network's" " inputs and outputs order. It might not work well." From b22e50ede3e3803e34ccea29ab096a8ad664c129 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Mon, 17 Oct 2016 15:17:35 +0800 Subject: [PATCH 030/180] Update docker_instll.rst docker image name (#210) --- doc_cn/build_and_install/install/docker_install.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc_cn/build_and_install/install/docker_install.rst b/doc_cn/build_and_install/install/docker_install.rst index 44aa2a0983f4f..a5f5fb117e11e 100644 --- a/doc_cn/build_and_install/install/docker_install.rst +++ b/doc_cn/build_and_install/install/docker_install.rst @@ -23,9 +23,9 @@ PaddlePaddle提供的Docker镜像版本 +-----------------+------------------+------------------------+-----------------------+ | GPU | gpu-latest | gpu-devel-latest | gpu-demo-latest | +-----------------+------------------+------------------------+-----------------------+ -| CPU WITHOUT AVX | cpu-noavx-latest | cpu-devel-noavx-latest | cpu-demo-noavx-latest | +| CPU WITHOUT AVX | cpu-noavx-latest | cpu-noavx-devel-latest | cpu-noavx-demo-latest | +-----------------+------------------+------------------------+-----------------------+ -| GPU WITHOUT AVX | gpu-noavx-latest | gpu-devel-noavx-latest | gpu-demo-noavx-latest | +| GPU WITHOUT AVX | gpu-noavx-latest | gpu-noavx-devel-latest | gpu-noavx-demo-latest | +-----------------+------------------+------------------------+-----------------------+ 其中,横向包括三个版本,normal,devel和demo。 From 28bc05b126b0c1fb5da4825e10c67e13609e1fde Mon Sep 17 00:00:00 2001 From: emailweixu Date: Mon, 17 Oct 2016 17:56:21 +0800 Subject: [PATCH 031/180] Fix sparse training for trainer_count=1 (#204) * Fix sparse training for trainer_count=1 For trainer_count=1, the gradient machine is NeuralNetwork, which does not create parameter buf for PARAMETER_GRADIENT for sparse update in Parameter::enableType. But gradient parameter buf is still used in SgdThreadUpdater. * Minor update to comment --- paddle/gserver/evaluators/ChunkEvaluator.cpp | 2 +- .../gradientmachines/MultiGradientMachine.cpp | 1 - paddle/parameter/Parameter.h | 6 ++++++ paddle/trainer/ThreadParameterUpdater.cpp | 16 +++++++++++++++- paddle/utils/Logging.h | 2 +- 5 files changed, 23 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/evaluators/ChunkEvaluator.cpp b/paddle/gserver/evaluators/ChunkEvaluator.cpp index 273925ba55ee4..22579891f397a 100644 --- a/paddle/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/gserver/evaluators/ChunkEvaluator.cpp @@ -75,7 +75,6 @@ class ChunkEvaluator : public Evaluator { public: virtual void init(const EvaluatorConfig& config) { - CHECK(!FLAGS_use_gpu) << "Not supported"; Evaluator::init(config); if (config.chunk_scheme() == "IOB") { numTagTypes_ = 2; @@ -137,6 +136,7 @@ class ChunkEvaluator : public Evaluator { CHECK_EQ(arguments.size(), (size_t)2); IVectorPtr& output = arguments[0].ids; IVectorPtr& label = arguments[1].ids; + CHECK(!output->useGpu() && !label->useGpu()) << "Not supported"; auto sequenceStartPositions = arguments[1].sequenceStartPositions->getVector(false); CHECK_EQ(output->getSize(), label->getSize()); diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp index 787ce703a08ae..0ded30eeb44e9 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/MultiGradientMachine.cpp @@ -813,7 +813,6 @@ void TrainerThread::mergeGradSparse( para->getMat(PARAMETER_GRADIENT).get()); std::vector& ids = mainMat->getIds(threadId_); - ids.clear(); for (auto slaveParams : slaveParameters) { SparseRowCpuMatrix* mat = dynamic_cast((*slaveParams)[pid] diff --git a/paddle/parameter/Parameter.h b/paddle/parameter/Parameter.h index 2f9606dc68026..ff251fe89f9f8 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/parameter/Parameter.h @@ -146,6 +146,12 @@ class Parameter { } } + void enableBufType(ParameterType type) { + if (bufs_[type]) return; + bufs_[type] = Vector::createParallelVector(config_.size(), useGpu_); + bufs_[type]->zeroMem(); + } + void enableIntType(ParameterType type, size_t intStoreSize = 0) { if (!intBufs_[type]) { SetDevice device(deviceId_); diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/trainer/ThreadParameterUpdater.cpp index 91f7f4d29df93..a26e9239f987f 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/trainer/ThreadParameterUpdater.cpp @@ -20,6 +20,8 @@ limitations under the License. */ #include "paddle/math/SparseRowMatrix.h" #include "paddle/utils/Thread.h" +P_DECLARE_int32(trainer_count); + namespace paddle { SgdThreadUpdater::SgdThreadUpdater(const OptimizationConfig& optConfig) @@ -48,6 +50,13 @@ void SgdThreadUpdater::init(std::vector& parameters) { false /*inPserver*/)); size_t numRows = para->isGradSparseUpdate() ? para->getConfig().dims(0) : 0; optimizers_[pid]->init(numRows, ¶->getConfig()); + if (para->isGradSparseUpdate() && FLAGS_trainer_count == 1) { + // For trainer_count=1, the gradient machine is NeuralNetwork, which does + // not create parameter buf for PARAMETER_GRADIENT for sparse update in + // Parameter::enableType(). But gradient parameter buf is still used + // in SgdThreadUpdater. We need to explicitly create it. + para->enableBufType(PARAMETER_GRADIENT); + } } } @@ -211,7 +220,7 @@ void SgdThreadUpdater::threadUpdateSparse( // From MultiGradientMachine SparseRowIdsCpuMatrix* mainMat = dynamic_cast( para->getMat(PARAMETER_GRADIENT).get()); - const std::vector& sparseIds = mainMat->getIds(tid); + std::vector& sparseIds = mainMat->getIds(tid); for (auto id : sparseIds) { // setup sub bufs @@ -221,6 +230,7 @@ void SgdThreadUpdater::threadUpdateSparse( optimizer->update(vecs, para->getConfig(), id); vecs[PARAMETER_GRADIENT]->zeroMem(); } + sparseIds.clear(); } else if (dynamic_cast( para->getMat(PARAMETER_GRADIENT).get())) { // From NeuralNetwork @@ -246,6 +256,10 @@ void SgdThreadUpdater::threadUpdateSparse( optimizer->update(vecs, para->getConfig(), id); vecs[PARAMETER_GRADIENT]->zeroMem(); } + // For numThreads > 1, MultiGradientMachine is used, which goes + // to the above branch. + CHECK_EQ(numThreads, 1); + mainMat->clearIndices(); } else { auto & m = *para->getMat(PARAMETER_GRADIENT).get(); LOG(FATAL) << "Internal error: " << para->getName() << " " diff --git a/paddle/utils/Logging.h b/paddle/utils/Logging.h index b3f439804686f..7fdfa3240c1de 100644 --- a/paddle/utils/Logging.h +++ b/paddle/utils/Logging.h @@ -191,7 +191,7 @@ void installFailureWriter(void(*callback)(const char*, int)); } #endif // PADDLE_USE_GLOG -#ifdef NDEBUG +#ifndef NDEBUG #define DEBUG_LEVEL 5 #define DBG VLOG(DEBUG_LEVEL) #else From 45280a07dab539ca62a2f392786b95c797c0aa88 Mon Sep 17 00:00:00 2001 From: Zrachel Date: Tue, 18 Oct 2016 10:35:24 +0800 Subject: [PATCH 032/180] Supplement doc for RNN (#214) --- doc_cn/algorithm/rnn/hierarchical-rnn.md | 138 ++++++++++++++++++++++- 1 file changed, 137 insertions(+), 1 deletion(-) diff --git a/doc_cn/algorithm/rnn/hierarchical-rnn.md b/doc_cn/algorithm/rnn/hierarchical-rnn.md index 979fe13e2ecbd..4a85cf336146e 100644 --- a/doc_cn/algorithm/rnn/hierarchical-rnn.md +++ b/doc_cn/algorithm/rnn/hierarchical-rnn.md @@ -260,7 +260,143 @@ out = recurrent_group(step=outer_step, input=SubsequenceInput(emb)) ## 示例3:双进双出,输入不等长 -TBD +**输入不等长**是指recurrent_group的多个输入在各时刻的长度可以不相等, 但需要指定一个和输出长度一致的input,用targetInlink表示。参考配置:单层RNN(`sequence_rnn_multi_unequalength_inputs.conf`),双层RNN(`sequence_nest_rnn_multi_unequalength_inputs.conf`) + +### 读取双层序列的方法 + +我们看一下单双层序列的数据组织形式和dataprovider(见`rnn_data_provider.py`) +```python +data2 = [ + [[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]] ,0], + [[[0, 2], [2, 5], [0, 1, 2]],[[1, 5], [4], [2, 3, 6, 1]], 1], +] + +@provider(input_types=[integer_value_sub_sequence(10), + integer_value_sub_sequence(10), + integer_value(2)], + should_shuffle=False) +def process_unequalength_subseq(settings, file_name): #双层RNN的dataprovider + for d in data2: + yield d + + +@provider(input_types=[integer_value_sequence(10), + integer_value_sequence(10), + integer_value(2)], + should_shuffle=False) +def process_unequalength_seq(settings, file_name): #单层RNN的dataprovider + for d in data2: + words1=reduce(lambda x,y: x+y, d[0]) + words2=reduce(lambda x,y: x+y, d[1]) + yield words1, words2, d[2] +``` + +data2 中有两个样本,每个样本有两个特征, 记fea1, fea2。 + +- 单层序列:两个样本分别为[[1, 2, 4, 5, 2], [5, 4, 1, 3, 1]] 和 [[0, 2, 2, 5, 0, 1, 2], [1, 5, 4, 2, 3, 6, 1]] +- 双层序列:两个样本分别为 + - **样本1**:[[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]]]。fea1和fea2都分别有2个子句,fea1=[[1, 2], [4, 5, 2]], fea2=[[5, 4, 1], [3, 1]] + - **样本2**:[[[0, 2], [2, 5], [0, 1, 2]],[[1, 5], [4], [2, 3, 6, 1]]]。fea1和fea2都分别有3个子句, fea1=[[0, 2], [2, 5], [0, 1, 2]], fea2=[[1, 5], [4], [2, 3, 6, 1]]。
+ - **注意**:每个样本中,各特征的子句数目需要相等。这里说的“双进双出,输入不等长”是指fea1在i时刻的输入的长度可以不等于fea2在i时刻的输入的长度。如对于第1个样本,时刻i=2, fea1[2]=[4, 5, 2],fea2[2]=[3, 1],3≠2。 +- 单双层序列中,两个样本的label都分别是0和1 + +### 模型中的配置 + +单层RNN(`sequence_rnn_multi_unequalength_inputs.conf`)和双层RNN(`sequence_nest_rnn_multi_unequalength_inputs.conf`)两个模型配置达到的效果完全一样,区别只在于输入为单层还是双层序列,现在我们来看它们内部分别是如何实现的。 + +- 单层序列: + - 过了一个简单的recurrent_group。每一个时间步,当前的输入y和上一个时间步的输出rnn_state做了一个全连接,功能与示例2中`sequence_rnn.conf`的`step`函数完全相同。这里,两个输入x1,x2分别通过calrnn返回最后时刻的状态。结果得到的encoder1_rep和encoder2_rep分别是单层序列,最后取encoder1_rep的最后一个时刻和encoder2_rep的所有时刻分别相加得到context。 + - 注意到这里recurrent_group输入的每个样本中,fea1和fea2的长度都分别相等,这并非偶然,而是因为recurrent_group要求输入为单层序列时,所有输入的长度都必须相等。 + +```python +def step(x1, x2): + def calrnn(y): + mem = memory(name = 'rnn_state_' + y.name, size = hidden_dim) + out = fc_layer(input = [y, mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'rnn_state_' + y.name) + return out + + encoder1 = calrnn(x1) + encoder2 = calrnn(x2) + return [encoder1, encoder2] + +encoder1_rep, encoder2_rep = recurrent_group( + name="stepout", + step=step, + input=[emb1, emb2]) + +encoder1_last = last_seq(input = encoder1_rep) +encoder1_expandlast = expand_layer(input = encoder1_last, + expand_as = encoder2_rep) +context = mixed_layer(input = [identity_projection(encoder1_expandlast), + identity_projection(encoder2_rep)], + size = hidden_dim) +``` +- 双层序列: + - 双层RNN中,对输入的两个特征分别求时序上的连续全连接(`inner_step1`和`inner_step2`分别处理fea1和fea2),其功能与示例2中`sequence_nest_rnn.conf`的`outer_step`函数完全相同。不同之处是,此时输入`[SubsequenceInput(emb1), SubsequenceInput(emb2)]`在各时刻并不等长。 + - 函数`outer_step`中可以分别处理这两个特征,但我们需要用targetInlink指定recurrent_group的输出的格式(各子句长度)只能和其中一个保持一致,如这里选择了和emb2的长度一致。 + - 最后,依然是取encoder1_rep的最后一个时刻和encoder2_rep的所有时刻分别相加得到context。 + +```python +def outer_step(x1, x2): + outer_mem1 = memory(name = "outer_rnn_state1", size = hidden_dim) + outer_mem2 = memory(name = "outer_rnn_state2", size = hidden_dim) + def inner_step1(y): + inner_mem = memory(name = 'inner_rnn_state_' + y.name, + size = hidden_dim, + boot_layer = outer_mem1) + out = fc_layer(input = [y, inner_mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'inner_rnn_state_' + y.name) + return out + + def inner_step2(y): + inner_mem = memory(name = 'inner_rnn_state_' + y.name, + size = hidden_dim, + boot_layer = outer_mem2) + out = fc_layer(input = [y, inner_mem], + size = hidden_dim, + act = TanhActivation(), + bias_attr = True, + name = 'inner_rnn_state_' + y.name) + return out + + encoder1 = recurrent_group( + step = inner_step1, + name = 'inner1', + input = x1) + + encoder2 = recurrent_group( + step = inner_step2, + name = 'inner2', + input = x2) + + sentence_last_state1 = last_seq(input = encoder1, name = 'outer_rnn_state1') + sentence_last_state2_ = last_seq(input = encoder2, name = 'outer_rnn_state2') + + encoder1_expand = expand_layer(input = sentence_last_state1, + expand_as = encoder2) + + return [encoder1_expand, encoder2] + +encoder1_rep, encoder2_rep = recurrent_group( + name="outer", + step=outer_step, + input=[SubsequenceInput(emb1), SubsequenceInput(emb2)], + targetInlink=emb2) + +encoder1_last = last_seq(input = encoder1_rep) +encoder1_expandlast = expand_layer(input = encoder1_last, + expand_as = encoder2_rep) +context = mixed_layer(input = [identity_projection(encoder1_expandlast), + identity_projection(encoder2_rep)], + size = hidden_dim) +``` ## 示例4:beam_search的生成 From 58f896c3f40602116f68f6bfc58c96228d0f48bd Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 18 Oct 2016 03:12:03 +0000 Subject: [PATCH 033/180] Speed up PyDP2, support numpy.float array (#207) --- cmake/flags.cmake | 4 ++- demo/mnist/data/get_mnist_data.sh | 2 +- paddle/gserver/dataproviders/DataProvider.cpp | 8 +++-- paddle/gserver/dataproviders/DataProvider.h | 5 +-- .../gserver/dataproviders/PyDataProvider2.cpp | 36 +++++++++++++++---- paddle/utils/Queue.h | 15 ++++++++ .../trainer_config_helpers/data_sources.py | 1 + 7 files changed, 59 insertions(+), 12 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index cc59309ee7efa..dbad6be3f41b3 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -64,7 +64,9 @@ set(COMMON_FLAGS -Wdelete-non-virtual-dtor -Wno-unused-parameter -Wno-error=literal-suffix - -Wno-error=unused-local-typedefs) + -Wno-error=unused-local-typedefs + -Wno-error=unused-function # Warnings in Numpy Header. +) foreach(flag ${COMMON_FLAGS}) safe_set_cflag(CMAKE_C_FLAGS ${flag}) diff --git a/demo/mnist/data/get_mnist_data.sh b/demo/mnist/data/get_mnist_data.sh index c3ef99445049d..9099b5ab6fb85 100755 --- a/demo/mnist/data/get_mnist_data.sh +++ b/demo/mnist/data/get_mnist_data.sh @@ -1,6 +1,6 @@ #!/usr/bin/env sh # This scripts downloads the mnist data and unzips it. - +set -e DIR="$( cd "$(dirname "$0")" ; pwd -P )" rm -rf "$DIR/raw_data" mkdir "$DIR/raw_data" diff --git a/paddle/gserver/dataproviders/DataProvider.cpp b/paddle/gserver/dataproviders/DataProvider.cpp index c3b4769f7612b..8cefbb30ada46 100644 --- a/paddle/gserver/dataproviders/DataProvider.cpp +++ b/paddle/gserver/dataproviders/DataProvider.cpp @@ -57,7 +57,8 @@ void BufferBatch::clone(DataBatch* srcBatch, bool useGpu) { } } -DoubleBuffer::DoubleBuffer(DataProvider* dataPool, bool useGpu, +DoubleBuffer::DoubleBuffer(DataProvider *dataPool, + bool useGpu, int64_t batchSize) { batchSize_ = batchSize; dataPool_ = dataPool; @@ -110,6 +111,9 @@ void DoubleBuffer::removeOneBatch(DataBatch* dataBatch) { } void DoubleBuffer::insertOneBatch(DataBatch* batch) { + while (!bufferQueue_->waitNotEmptyFor(2 /* seconds */)) { // time out + if (stopping_) return; + } BufferBatch* bufBatch = bufferQueue_->dequeue(); // clone and copy the data from an Threadlocal Variable bufBatch->clone(batch, useGpu_); @@ -138,7 +142,7 @@ void DoubleBuffer::asyncLoadBatch() { actualSize = dataPool_->getNextBatchInternal(batchSize_, &newBatch); } insertOneBatch(&newBatch); - } while (actualSize > 0); + } while (actualSize > 0 && !stopping_); } } diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/gserver/dataproviders/DataProvider.h index c24546374abf3..112e45de1cb23 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/gserver/dataproviders/DataProvider.h @@ -259,7 +259,9 @@ typedef Queue BufferBatchQueue; class DoubleBuffer { public: - DoubleBuffer(DataProvider* dataPool, bool useGpu, int64_t batchSize = 0); + DoubleBuffer(DataProvider* dataPool, + bool useGpu, + int64_t batchSize = 0); virtual ~DoubleBuffer(); void removeOneBatch(DataBatch* dataBatch); @@ -349,7 +351,6 @@ class DataProvider { */ virtual void reset() { if (doubleBuffer_ != nullptr) { - LOG(INFO) << "the double-buffer is starting ..."; doubleBuffer_->startAsyncLoad(); } } diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index e3e472ac166c2..c464d01fdefd1 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -18,9 +18,16 @@ limitations under the License. */ #include #include #include +#include +#include +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include #include "DataProvider.h" + #include "paddle/utils/PythonUtil.h" +#include "paddle/utils/Locks.h" +#include "paddle/utils/Stat.h" namespace paddle { @@ -202,7 +209,10 @@ class PyDataProvider2 : public DataProvider { PyDataProvider2(const DataConfig& config, const ModelConfig& modelConfig, bool useGpu) - :DataProvider(config, useGpu), callingContextCreated_(2) { + :DataProvider(config, useGpu), + callingContextCreated_(2) { + if (PyArray_API == NULL) + import_array(); auto& args = config.load_data_args(); PyObjectPtr kwargs = PyObjectPtr(PyDict_New()); if (!args.empty()) { @@ -454,6 +464,7 @@ class PyDataProvider2 : public DataProvider { std::condition_variable pushCV_; std::condition_variable pullCV_; std::mutex mtx_; + ThreadBarrier callingContextCreated_; std::unique_ptr cache_; @@ -496,8 +507,8 @@ class PyDataProvider2 : public DataProvider { * Resetting the PyDataProvider. May start reading thread here. */ virtual void reset() { - DataProvider::reset(); resetImpl(true); + DataProvider::reset(); } /** @@ -518,6 +529,7 @@ class PyDataProvider2 : public DataProvider { * Loading a batch of data. */ int64_t getNextBatchInternal(int64_t size_, DataBatch *batch) { + REGISTER_TIMER("PyDP2.getNextBatchInternal") CHECK_GE(size_, 0); size_t size = (size_t) size_; if (loadThread_) { // loading from thread should wait for data pool ready. @@ -698,10 +710,22 @@ class DenseScanner: public IFieldScanner { */ virtual void fill(Argument &argument, PyObject *obj) { real* dat = argument.value->getData() + height_ * headerPtr_->dim; - py::SequenceHelper s(obj); - // TODO(yuyang18): Here we can use AVX or SSE to accelerate memory copy. - for (size_t i=0; i < headerPtr_->dim; ++i) { - dat[i] = (real) s.getDouble(i); + if (PyArray_Check(obj)) { + auto dtype = PyArray_DTYPE((PyArrayObject*)obj); + if (dtype->type == 'f' && dtype->elsize == sizeof(real)) { + real * data = (real*)PyArray_DATA((PyArrayObject*)obj); + auto sz = PyArray_SIZE((PyArrayObject*)obj); + std::copy(data, data + sz, dat); + } else { + LOG(FATAL) << "You should yield float" << sizeof(real) * 8 + << " array"; + } + } else { + py::SequenceHelper s(obj); + // TODO(yuyang18): Here we can use AVX or SSE to accelerate memory copy. + for (size_t i=0; i < headerPtr_->dim; ++i) { + dat[i] = (real) s.getDouble(i); + } } ++height_; } diff --git a/paddle/utils/Queue.h b/paddle/utils/Queue.h index d73f27d7fafd6..f952cf58778de 100644 --- a/paddle/utils/Queue.h +++ b/paddle/utils/Queue.h @@ -135,6 +135,21 @@ class Queue { queueCV_.wait(lock, [this]() { return numElements_ == 0; }); } + /** + * @brief wait queue is not empty at most for some seconds. + * @param seconds wait time limit. + * @return true if queue is not empty. false if timeout. + */ + bool waitNotEmptyFor(int seconds) { + std::unique_lock lock(queueLock_); + return queueCV_.wait_for( + lock, + std::chrono::seconds(seconds), + [this] { + return numElements_ != 0; + }); + } + private: std::deque elements_; int numElements_; diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index 8ada3903dc06b..3b5c17a271f02 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -84,6 +84,7 @@ def py_data2(files, load_data_module, load_data_object, load_data_args, data.load_data_module = load_data_module data.load_data_object = load_data_object data.load_data_args = load_data_args + data.async_load_data = True return data data_cls = py_data2 From 6bef8390b92b4931b6ef0ded73f73a389ff1bef6 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Tue, 18 Oct 2016 14:53:57 +0800 Subject: [PATCH 034/180] fix bug in some different python environment (#220) --- paddle/gserver/dataproviders/PyDataProvider2.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index c464d01fdefd1..ca8b07af49ca0 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #ifndef PADDLE_NO_PYTHON +#include #include #include #include #include -#include #include #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include From 76fb74dc2e542bcaffb726bb137ff112b213f82c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 19 Oct 2016 15:13:31 +0800 Subject: [PATCH 035/180] Fix install_docker.rst and data_sources file open mode * Follow #223 * Fix #222 --- doc/build/docker_install.rst | 2 +- python/paddle/trainer_config_helpers/data_sources.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/docker_install.rst b/doc/build/docker_install.rst index 542b9bac27afb..e95de35f4da35 100644 --- a/doc/build/docker_install.rst +++ b/doc/build/docker_install.rst @@ -69,7 +69,7 @@ If you want to launch container with GPU support, you need to set some environme .. code-block:: bash - export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}" + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:gpu-latest diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index 3b5c17a271f02..f51140656d0dc 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -68,7 +68,7 @@ def define_py_data_source(file_list, cls, module, file_list_name = 'train.list' if isinstance(cls, TestData): file_list_name = 'test.list' - with open(file_list_name, 'r') as f: + with open(file_list_name, 'w') as f: f.writelines(file_list) file_list = file_list_name From e1f57bfd66245f78d04f47d670cba5592d5734b2 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Wed, 19 Oct 2016 15:19:32 +0800 Subject: [PATCH 036/180] add base class for seqlastin/max/average layer (#187) --- paddle/gserver/layers/AverageLayer.cpp | 74 ++-------------- paddle/gserver/layers/AverageLayer.h | 19 ++--- paddle/gserver/layers/MaxLayer.cpp | 79 ++--------------- paddle/gserver/layers/MaxLayer.h | 19 +++-- .../layers/SequenceLastInstanceLayer.cpp | 82 +++--------------- paddle/gserver/layers/SequencePoolLayer.cpp | 84 +++++++++++++++++++ paddle/gserver/layers/SequencePoolLayer.h | 57 +++++++++++++ 7 files changed, 188 insertions(+), 226 deletions(-) create mode 100644 paddle/gserver/layers/SequencePoolLayer.cpp create mode 100644 paddle/gserver/layers/SequencePoolLayer.h diff --git a/paddle/gserver/layers/AverageLayer.cpp b/paddle/gserver/layers/AverageLayer.cpp index 6e52217de4e63..7401cdc9a516b 100644 --- a/paddle/gserver/layers/AverageLayer.cpp +++ b/paddle/gserver/layers/AverageLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "AverageLayer.h" #include "paddle/utils/Logging.h" @@ -25,13 +24,8 @@ REGISTER_LAYER(average, AverageLayer); bool AverageLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { - /* Initialize the basic parent class */ - Layer::init(layerMap, parameterMap); + SequencePoolLayer::init(layerMap, parameterMap); - /* initialize biases_ */ - if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); - } dataMtx_ = Matrix::create(nullptr, 1, 1, false, useGpu_); outMtx_ = Matrix::create(nullptr, 1, getSize(), false, useGpu_); // average strategy @@ -44,57 +38,15 @@ bool AverageLayer::init(const LayerMap& layerMap, } else { LOG(FATAL) << "Unknown average strategy: " << config_.average_strategy(); } - // transform to which sequence type - if (config_.trans_type() == "non-seq") { - type_ = kNonSeq; - } else if (config_.trans_type() == "seq") { - type_ = kSeq; - } else { - LOG(FATAL) << "Unknown trans_type: " << config_.trans_type(); - } - setNeedSequenceInfo(false); return true; } void AverageLayer::forward(PassType passType) { - Layer::forward(passType); - - // average layer should have exactly 1 input - CHECK_EQ(1U, inputLayers_.size()); + SequencePoolLayer::forward(passType); - size_t dim = getSize(); - const Argument& input = getInput(0); - CHECK(input.sequenceStartPositions); - if (type_) { - CHECK(input.subSequenceStartPositions) - << "when trans_type = seq, input must hasSubseq"; - } - int64_t newBatchSize = - type_ ? input.getNumSubSequences() : input.getNumSequences(); - ICpuGpuVectorPtr startPositions = - type_ ? input.subSequenceStartPositions - : input.sequenceStartPositions; - const int* starts = startPositions->getData(false); - size_t numSequences = startPositions->getSize() - 1; - - // check - CHECK_EQ(numSequences, (size_t)newBatchSize); - CHECK_EQ(starts[numSequences], input.getBatchSize()); - CHECK_EQ(dim, input.value->getWidth()); - - resetOutput(newBatchSize, dim); - auto startsPos = startPositions->getVector(useGpu_); MatrixPtr inputValue = getInputValue(0); - getOutputValue()->sequenceAvgForward(*inputValue, *startsPos, mode_); - - /* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq, - * thus, in this case, output_ has no sequenceStartPositions. - * If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this - * case, we should compute the new sequenceStartPositions. - */ - if (type_) { - output_.degradeSequence(input, useGpu_); - } + getOutputValue()->sequenceAvgForward( + *inputValue, *startPositions_->getVector(useGpu_), mode_); /* add the bias-vector AFTER average operation */ if (biases_.get() != NULL) { @@ -106,26 +58,16 @@ void AverageLayer::forward(PassType passType) { } void AverageLayer::backward(const UpdateCallback& callback) { - const Argument& input = getInput(0); - ICpuGpuVectorPtr startPositions = - type_ ? input.subSequenceStartPositions - : input.sequenceStartPositions; - const int* starts = startPositions->getData(false); - /* Do derivation */ { backwardActivation(); } - - if (biases_ && biases_->getWGrad()) { - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - - // Increasing the number of gradient - biases_->getParameterPtr()->incUpdate(callback); - } + SequencePoolLayer::backward(callback); + const int* starts = startPositions_->getData(false); MatrixPtr grad = getInputGrad(0); + if (grad) { size_t dim = getSize(); real* gradientData = getInputGrad(0)->getData(); real* gradient = getOutputGrad()->getData(); - size_t numSequences = startPositions->getSize() - 1; + size_t numSequences = startPositions_->getSize() - 1; for (size_t sequenceId = 0; sequenceId < numSequences; ++sequenceId) { // TODO(Dangqingqing) optimization for GPU int sequenceLength = starts[sequenceId + 1] - starts[sequenceId]; diff --git a/paddle/gserver/layers/AverageLayer.h b/paddle/gserver/layers/AverageLayer.h index ae910ddefad13..1edc2ace492c5 100644 --- a/paddle/gserver/layers/AverageLayer.h +++ b/paddle/gserver/layers/AverageLayer.h @@ -12,10 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once -#include "Layer.h" +#include "SequencePoolLayer.h" #include "paddle/math/Matrix.h" namespace paddle { @@ -23,20 +22,21 @@ namespace paddle { /** * A layer for "internal average" for sequence input. * Input: one or more sequences. Each sequence contains some instances. - * If AverageLevel = kNonSeq: + * If SequenceLevel = kNonSeq: * Output: output size is the number of input sequences (NOT input instances) * output[i] = average_{for each instance in this sequence}{input[i]} - * If AverageLevel = kSeq: + * If SequenceLevel = kSeq: * Check input sequence must has sub-sequence * Output: output size is the number of input sub-sequences * output[i] = average_{for each instance in this sub-sequence}{input[i]} + * + * The config file api is pooling_layer. */ - -class AverageLayer : public Layer { +class AverageLayer : public SequencePoolLayer { public: enum AverageStrategy { kAverage = 0, kSum = 1, kAverageSquareRootN = 2 }; - enum AverageLevel { kNonSeq = 0, kSeq = 1 }; - explicit AverageLayer(const LayerConfig& config) : Layer(config) {} + explicit AverageLayer(const LayerConfig& config) + : SequencePoolLayer(config) {} ~AverageLayer() {} @@ -46,11 +46,8 @@ class AverageLayer : public Layer { void backward(const UpdateCallback& callback = nullptr); protected: - std::unique_ptr biases_; MatrixPtr outMtx_; MatrixPtr dataMtx_; int mode_; - int type_; }; - } // namespace paddle diff --git a/paddle/gserver/layers/MaxLayer.cpp b/paddle/gserver/layers/MaxLayer.cpp index 226e0ea87dbd4..c4ffe894eccd6 100644 --- a/paddle/gserver/layers/MaxLayer.cpp +++ b/paddle/gserver/layers/MaxLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "MaxLayer.h" #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" @@ -21,55 +20,11 @@ namespace paddle { REGISTER_LAYER(max, MaxLayer); -bool MaxLayer::init(const LayerMap& layerMap, - const ParameterMap& parameterMap) { - /* Initialize the basic parent class */ - Layer::init(layerMap, parameterMap); - - /* initialize biases_ */ - if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); - } - - // transform to which sequence type - if (config_.trans_type() == "non-seq") { - type_ = kNonSeq; - } else if (config_.trans_type() == "seq") { - type_ = kSeq; - } else { - LOG(FATAL) << "Unknown trans_type: " << config_.trans_type(); - } - setNeedSequenceInfo(false); - return true; -} - void MaxLayer::forward(PassType passType) { - Layer::forward(passType); - // max layer should have exactly 1 input - CHECK_EQ(1U, inputLayers_.size()); - - size_t dim = getSize(); - const Argument& input = getInput(0); - int64_t newBatchSize = - type_ ? input.getNumSubSequences() : input.getNumSequences(); - ICpuGpuVectorPtr startPositions = - type_ ? input.subSequenceStartPositions - : input.sequenceStartPositions; - auto starts = startPositions->getVector(useGpu_); - size_t numSequences = startPositions->getSize() - 1; + SequencePoolLayer::forward(passType); - CHECK_EQ(dim, input.value->getWidth()); - CHECK_EQ(numSequences, (size_t)newBatchSize); - CHECK_EQ(startPositions->getData(false)[numSequences], input.getBatchSize()); - if (type_) { - // when trans_type = seq, input must hasSubseq - CHECK_EQ(input.hasSubseq(), 1UL); - } - - // reset output: resize to "num of sequences", not "batch size". - resetOutput(newBatchSize, dim); - - IVector::resizeOrCreate(maxIndex_, newBatchSize * dim, useGpu(deviceId_)); + IVector::resizeOrCreate(maxIndex_, newBatchSize_ * getSize(), + useGpu(deviceId_)); maxIndex_->zeroMem(); MatrixPtr inputValue = getInputValue(0); @@ -77,16 +32,8 @@ void MaxLayer::forward(PassType passType) { { REGISTER_TIMER_INFO("MaxLayerForward", getName().c_str()); - outputValue->maxSequenceForward(*inputValue, *starts, *maxIndex_); - } - - /* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq, - * thus, in this case, output_ has no cpuSequenceStartPositions. - * If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this - * case, we should compute the new cpuSequenceStartPositions. - */ - if (type_) { - output_.degradeSequence(input, useGpu_); + outputValue->maxSequenceForward( + *inputValue, *startPositions_->getVector(useGpu_), *maxIndex_); } if (config_.output_max_index()) { @@ -104,24 +51,14 @@ void MaxLayer::forward(PassType passType) { void MaxLayer::backward(const UpdateCallback& callback) { CHECK(!config_.output_max_index()) << "backward is not available when output_max_index is set"; - /* Do derivation */ { backwardActivation(); } - - if (biases_ && biases_->getWGrad()) { - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - - // Increasing the number of gradient - biases_->getParameterPtr()->incUpdate(callback); - } + SequencePoolLayer::backward(callback); MatrixPtr inputGrad = getInputGrad(0); MatrixPtr outputGrad = getOutputGrad(); if (inputGrad) { - ICpuGpuVectorPtr starts = - type_ ? getInput(0).subSequenceStartPositions - : getInput(0).sequenceStartPositions; REGISTER_TIMER_INFO("MaxLayerBackward", getName().c_str()); - inputGrad->maxSequenceBackward(*outputGrad, - *(starts->getVector(useGpu_)), *maxIndex_); + inputGrad->maxSequenceBackward( + *outputGrad, *(startPositions_->getVector(useGpu_)), *maxIndex_); } } diff --git a/paddle/gserver/layers/MaxLayer.h b/paddle/gserver/layers/MaxLayer.h index b4c34e665d926..e6dcfe9c6759d 100644 --- a/paddle/gserver/layers/MaxLayer.h +++ b/paddle/gserver/layers/MaxLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once -#include "Layer.h" +#include "SequencePoolLayer.h" #include "paddle/math/Matrix.h" #include "paddle/utils/ThreadLocal.h" @@ -24,29 +24,30 @@ namespace paddle { /** * A layer for "internal max" for sequence input. * Input: one or more sequences. Each sequence contains some instances. - * If MaxLevel = kNonSeq: + * If SequenceLevel = kNonSeq: * Output: output size is the number of input sequences (NOT input instances) * output[i] = max_{for each instance in this sequence}{input[i]} - * If MaxLevel = kSeq: + * If SequenceLevel = kSeq: * Check input sequence must has sub-sequence * Output: output size is the number of input sub-sequences * output[i] = max_{for each instance in this sub-sequence}{input[i]} + * + * The config file api is pooling_layer. */ -class MaxLayer : public Layer { +class MaxLayer : public SequencePoolLayer { protected: - std::unique_ptr biases_; // maxIndex_[i][j] = k : the value at (i, j) is from input[k]. IVectorPtr maxIndex_; - int type_; public: - explicit MaxLayer(const LayerConfig& config) : Layer(config) {} - enum MaxLevel {kNonSeq = 0, kSeq = 1 }; + explicit MaxLayer(const LayerConfig& config) : SequencePoolLayer(config) {} ~MaxLayer() {} - bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) { + return SequencePoolLayer::init(layerMap, parameterMap); + } void forward(PassType passType); void backward(const UpdateCallback& callback = nullptr); diff --git a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp index f4d26ba21bed6..26d9536dd57aa 100644 --- a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/gserver/layers/SequenceLastInstanceLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" -#include "Layer.h" +#include "SequencePoolLayer.h" #include "paddle/math/Matrix.h" #include "paddle/utils/Stat.h" @@ -29,20 +29,19 @@ namespace paddle { * If SequenceLevel = kSeq: * Check input sequence must has sub-sequence * Output: a sequence containing only the last instance of each sub-sequence - * of the input sequence + * of the input sequence + * + * The config file api is last_seq and first_seq. */ -class SequenceLastInstanceLayer : public Layer { +class SequenceLastInstanceLayer : public SequencePoolLayer { protected: - std::unique_ptr biases_; MatrixPtr tmpSrc_; MatrixPtr tmpDest_; - enum SequenceLevel { kNonSeq = 0, kSeq = 1 }; - int type_; public: explicit SequenceLastInstanceLayer(const LayerConfig& config) - : Layer(config) {} + : SequencePoolLayer(config) {} ~SequenceLastInstanceLayer() {} @@ -56,56 +55,20 @@ REGISTER_LAYER(seqlastins, SequenceLastInstanceLayer); bool SequenceLastInstanceLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { - /* Initialize the basic parent class */ - Layer::init(layerMap, parameterMap); - - // seqlastins layer should have exactly 1 input - CHECK_EQ(1U, inputLayers_.size()); - - /* initialize biases_ */ - if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); - } + SequencePoolLayer::init(layerMap, parameterMap); tmpSrc_ = Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, useGpu_); tmpDest_ = Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, useGpu_); - // transform to which sequence type - if (config_.trans_type() == "non-seq") { - type_ = kNonSeq; - } else if (config_.trans_type() == "seq") { - type_ = kSeq; - } else { - LOG(FATAL) << "Unknown trans_type: " << config_.trans_type(); - } - setNeedSequenceInfo(false); return true; } void SequenceLastInstanceLayer::forward(PassType passType) { - Layer::forward(passType); - - size_t dim = getSize(); - const Argument& input = getInput(0); + SequencePoolLayer::forward(passType); - // check - CHECK(input.sequenceStartPositions); - if (type_) { - CHECK(input.subSequenceStartPositions) - << "when trans_type = seq, input must hasSubseq"; - } - auto startPositions = - type_ ? input.subSequenceStartPositions->getVector(false) - : input.sequenceStartPositions->getVector(false); - size_t height = type_ ? input.getNumSubSequences() : input.getNumSequences(); - CHECK_EQ(dim, input.value->getWidth()); - CHECK_EQ(startPositions->getData()[height], input.getBatchSize()); - CHECK_EQ(height, startPositions->getSize() - 1); - - reserveOutput(height, dim); - const int* starts = startPositions->getData(); + const int* starts = startPositions_->getData(false); MatrixPtr inputValue = getInputValue(0); MatrixPtr outputValue = getOutputValue(); @@ -113,21 +76,13 @@ void SequenceLastInstanceLayer::forward(PassType passType) { AsyncGpuBlock asyncGpuBlock; REGISTER_TIMER_INFO("SequenceLastInstanceLayerForward", getName().c_str()); - for (size_t seqId = 0; seqId < height; ++seqId) { + for (size_t seqId = 0; seqId < newBatchSize_; ++seqId) { int insId = config_.select_first() ? starts[seqId] : starts[seqId + 1] - 1; outputValue->subMatrix(seqId, 1, tmpDest_) ->assign(*(inputValue->subMatrix(insId, 1, tmpSrc_))); } - /* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq, - * thus, in this case, output_ has no sequenceStartPositions. - * If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this - * case, we should compute the new sequenceStartPositions. - */ - if (type_) { - output_.degradeSequence(input, useGpu_); - } } if (biases_.get() != NULL) { @@ -139,23 +94,12 @@ void SequenceLastInstanceLayer::forward(PassType passType) { } void SequenceLastInstanceLayer::backward(const UpdateCallback& callback) { - /* activation, should set to 'linear' in most cases */ - backwardActivation(); - - if (biases_ && biases_->getWGrad()) { - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); - - // Increasing the number of gradient - biases_->getParameterPtr()->incUpdate(callback); - } + SequencePoolLayer::backward(callback); MatrixPtr inputGrad = getInputGrad(0); MatrixPtr outputGrad = getOutputGrad(); - auto startPositions = - type_ ? getInput(0).subSequenceStartPositions->getVector(false) - : getInput(0).sequenceStartPositions->getVector(false); - const int* starts = startPositions->getData(); - size_t numSequences = startPositions->getSize() - 1; + const int* starts = startPositions_->getData(false); + size_t numSequences = startPositions_->getSize() - 1; if (inputGrad) { AsyncGpuBlock asyncGpuBlock; diff --git a/paddle/gserver/layers/SequencePoolLayer.cpp b/paddle/gserver/layers/SequencePoolLayer.cpp new file mode 100644 index 0000000000000..55be73d363df1 --- /dev/null +++ b/paddle/gserver/layers/SequencePoolLayer.cpp @@ -0,0 +1,84 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/utils/Logging.h" +#include "SequencePoolLayer.h" + +namespace paddle { + +bool SequencePoolLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + // seqlastins/max/average layer should have exactly 1 input + CHECK_EQ(1U, inputLayers_.size()); + + /* initialize biases_ */ + if (biasParameter_.get() != NULL) { + biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + } + // transform to which sequence type + if (config_.trans_type() == "non-seq") { + type_ = kNonSeq; + } else if (config_.trans_type() == "seq") { + type_ = kSeq; + } else { + LOG(FATAL) << "Unknown trans_type: " << config_.trans_type(); + } + setNeedSequenceInfo(false); + return true; +} + +void SequencePoolLayer::forward(PassType passType) { + Layer::forward(passType); + + const Argument& input = getInput(0); + newBatchSize_ = type_ ? input.getNumSubSequences() : input.getNumSequences(); + size_t dim = getSize(); + // check + CHECK_EQ(dim, input.value->getWidth()); + startPositions_ = + type_ ? input.subSequenceStartPositions : input.sequenceStartPositions; + auto starts = startPositions_->getVector(false); + CHECK_EQ(starts->getData()[newBatchSize_], input.getBatchSize()); + CHECK_EQ(newBatchSize_, starts->getSize() - 1); + + resetOutput(newBatchSize_, dim); + if (type_) { + CHECK(input.subSequenceStartPositions) + << "when trans_type = seq, input must hasSubseq"; + } + /* If type_ = kNonSeq, both seq has or not has sub-seq degrade to a non-seq, + * thus, in this case, output_ has no sequenceStartPositions. + * If type_ = kSeq, seq has sub-seq degrades to a seq, thus, only in this + * case, we should compute the new sequenceStartPositions. + */ + if (type_) { + output_.degradeSequence(input, useGpu_); + } +} + +void SequencePoolLayer::backward(const UpdateCallback& callback) { + /* Do derivation */ { backwardActivation(); } + + if (biases_ && biases_->getWGrad()) { + biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + + // Increasing the number of gradient + biases_->getParameterPtr()->incUpdate(callback); + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/SequencePoolLayer.h b/paddle/gserver/layers/SequencePoolLayer.h new file mode 100644 index 0000000000000..669af80e1d447 --- /dev/null +++ b/paddle/gserver/layers/SequencePoolLayer.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Layer.h" +#include "paddle/math/Matrix.h" + +namespace paddle { +/** + * A base layer for SequenceLastInstanceLayer/AverageLayer/MaxLayer. + * + * Input: one or more sequences. Each sequence contains some instances. + * If SequenceLevel = kNonSeq: + * Output: output size is the number of input sequences (NOT input instances) + * output[i] = seqlastin/average/max_{for each instance in this + * sequence}{input[i]} + * If SequenceLevel = kSeq: + * Check input sequence must has sub-sequence + * Output: output size is the number of input sub-sequences + * output[i] = seqlastin/average/max_{for each instance in this + * sub-sequence}{input[i]} + * + * The config file api is pooling_layer. + */ + +class SequencePoolLayer : public Layer { +protected: + int type_; + std::unique_ptr biases_; + enum SequenceLevel { kNonSeq = 0, kSeq = 1 }; + size_t newBatchSize_; + ICpuGpuVectorPtr startPositions_; + +public: + explicit SequencePoolLayer(const LayerConfig& config) : Layer(config) {} + + virtual ~SequencePoolLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); +}; + +} // namespace paddle From e387cdba77018805fcc3a0f5897e9747253798d7 Mon Sep 17 00:00:00 2001 From: alvations Date: Fri, 21 Oct 2016 11:28:46 +0800 Subject: [PATCH 037/180] Added Bidi-LSTM and DB-LSTM to quick_start demo (#226) --- demo/quick_start/train.sh | 2 + demo/quick_start/trainer_config.bidi-lstm.py | 62 +++++++++++++++++ demo/quick_start/trainer_config.db-lstm.py | 73 ++++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 demo/quick_start/trainer_config.bidi-lstm.py create mode 100644 demo/quick_start/trainer_config.db-lstm.py diff --git a/demo/quick_start/train.sh b/demo/quick_start/train.sh index 1f0a137c8bd59..ea4e32249a3d0 100755 --- a/demo/quick_start/train.sh +++ b/demo/quick_start/train.sh @@ -18,6 +18,8 @@ cfg=trainer_config.lr.py #cfg=trainer_config.emb.py #cfg=trainer_config.cnn.py #cfg=trainer_config.lstm.py +#cfg=trainer_config.bidi-lstm.py +#cfg=trainer_config.db-lstm.py paddle train \ --config=$cfg \ --save_dir=./output \ diff --git a/demo/quick_start/trainer_config.bidi-lstm.py b/demo/quick_start/trainer_config.bidi-lstm.py new file mode 100644 index 0000000000000..3be3d37342271 --- /dev/null +++ b/demo/quick_start/trainer_config.bidi-lstm.py @@ -0,0 +1,62 @@ +# edit-mode: -*- python -*- + +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +dict_file = "./data/dict.txt" +word_dict = dict() +with open(dict_file, 'r') as f: + for i, line in enumerate(f): + w = line.strip().split()[0] + word_dict[w] = i + +is_predict = get_config_arg('is_predict', bool, False) +trn = 'data/train.list' if not is_predict else None +tst = 'data/test.list' if not is_predict else 'data/pred.list' +process = 'process' if not is_predict else 'process_predict' +define_py_data_sources2(train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) + +batch_size = 128 if not is_predict else 1 +settings( + batch_size=batch_size, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25 +) + +bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) +data = data_layer(name="word", size=len(word_dict)) +emb = embedding_layer(input=data, size=128) + +bi_lstm = bidirectional_lstm(input=emb, size=128) +dropout = dropout_layer(input=bi_lstm, dropout_rate=0.5) + +output = fc_layer(input=dropout, size=2, + bias_attr=bias_attr, + act=SoftmaxActivation()) + +if is_predict: + maxid = maxid_layer(output) + outputs([maxid, output]) +else: + label = data_layer(name="label", size=2) + cls = classification_cost(input=output, label=label) + outputs(cls) diff --git a/demo/quick_start/trainer_config.db-lstm.py b/demo/quick_start/trainer_config.db-lstm.py new file mode 100644 index 0000000000000..b35bdf5a61b47 --- /dev/null +++ b/demo/quick_start/trainer_config.db-lstm.py @@ -0,0 +1,73 @@ +# edit-mode: -*- python -*- + +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +dict_file = "./data/dict.txt" +word_dict = dict() +with open(dict_file, 'r') as f: + for i, line in enumerate(f): + w = line.strip().split()[0] + word_dict[w] = i + +is_predict = get_config_arg('is_predict', bool, False) +trn = 'data/train.list' if not is_predict else None +tst = 'data/test.list' if not is_predict else 'data/pred.list' +process = 'process' if not is_predict else 'process_predict' +define_py_data_sources2(train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) + +batch_size = 128 if not is_predict else 1 +settings( + batch_size=batch_size, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25 +) + +bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) + +data = data_layer(name="word", size=len(word_dict)) +emb = embedding_layer(input=data, size=128) + +hidden_0 = mixed_layer(size=128, input=[full_matrix_projection(input=emb)]) +lstm_0 = lstmemory(input=hidden_0, layer_attr=ExtraAttr(drop_rate=0.1)) + +input_layers = [hidden_0, lstm_0] + +for i in range(1,8): + fc = fc_layer(input=input_layers, size=128) + lstm = lstmemory(input=fc, layer_attr=ExtraAttr(drop_rate=0.1), + reverse=(i % 2) == 1,) + input_layers = [fc, lstm] + +lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) + +output = fc_layer(input=lstm_last, size=2, + bias_attr=bias_attr, + act=SoftmaxActivation()) + +if is_predict: + maxid = maxid_layer(output) + outputs([maxid, output]) +else: + label = data_layer(name="label", size=2) + cls = classification_cost(input=output, label=label) + outputs(cls) From ac383dd021b2c2479dd9e810892f47b12136b8b6 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 24 Oct 2016 10:00:18 +0800 Subject: [PATCH 038/180] add missing layer_attr (#234) --- .../paddle/trainer_config_helpers/layers.py | 106 +++++++++++++++--- 1 file changed, 88 insertions(+), 18 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 686704cb7c9b0..d45a9b53dcc94 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -2799,7 +2799,9 @@ def __cost_input__(input, label, weight=None): @wrap_name_default() -def regression_cost(input, label, weight=None, name=None): +@layer_support() +def regression_cost(input, label, weight=None, name=None, + layer_attr=None): """ Regression Layer. @@ -2814,12 +2816,15 @@ def regression_cost(input, label, weight=None, name=None): :param weight: The weight affects the cost, namely the scale of cost. It is an optional argument. :type weight: LayerOutput + :param layer_attr: layer's extra attribute. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ ipts, parents = __cost_input__(input, label, weight) - Layer(inputs=ipts, type="square_error", name=name) + Layer(inputs=ipts, type="square_error", name=name, + **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.COST, parents=parents) @@ -2948,7 +2953,8 @@ def conv_operator(img, filter, filter_size, num_filters, @wrap_name_default() -def conv_shift_layer(a, b, name=None): +@layer_support() +def conv_shift_layer(a, b, name=None, layer_attr=None): """ This layer performs cyclic convolution for two input. For example: - a[in]: contains M elements. @@ -2977,6 +2983,8 @@ def conv_shift_layer(a, b, name=None): :type a: LayerOutput :param b: input layer b :type b: LayerOutput + :param layer_attr: layer's extra attribute. + :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput """ @@ -2986,6 +2994,7 @@ def conv_shift_layer(a, b, name=None): name=name, type=LayerType.CONV_SHIFT_LAYER, inputs=[a.name, b.name], + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.CONV_SHIFT_LAYER, parents=[a, b], @@ -3059,6 +3068,7 @@ def tensor_layer(a, b, size, act=None, name=None, @wrap_param_attr_default() @wrap_bias_attr_default() @wrap_act_default() +@layer_support() def selective_fc_layer(input, select, size, act=None, name=None, pass_generation=False, has_selected_colums=True, @@ -3131,7 +3141,8 @@ def selective_fc_layer(input, select, size, act=None, name=None, @wrap_name_default() -def sampling_id_layer(input, name=None): +@layer_support() +def sampling_id_layer(input, name=None, layer_attr=None): """ A layer for sampling id from multinomial distribution from the input layer. Sampling one id for one sample. @@ -3146,6 +3157,8 @@ def sampling_id_layer(input, name=None): :type input: LayerOutput :param name: The Layer Name. :type name: basestring + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3153,12 +3166,15 @@ def sampling_id_layer(input, name=None): name=name, type=LayerType.SAMPLING_ID_LAYER, inputs=[Input(input.name)], + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.SAMPLING_ID_LAYER, input) @wrap_name_default() -def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0): +@layer_support() +def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0, + layer_attr=None): """ This layer for applying a slope and an intercept to the input element-wise. There is no activation and weight. @@ -3180,6 +3196,8 @@ def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0): :type slope: float. :param intercept: the offset. :type intercept: float. + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3189,12 +3207,15 @@ def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0): slope=slope, intercept=intercept, inputs=[Input(input.name)], + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.SLOPE_INTERCEPT_LAYER, input) @wrap_name_default() -def linear_comb_layer(weights, vectors, size=None, name=None): +@layer_support() +def linear_comb_layer(weights, vectors, size=None, name=None, + layer_attr=None): """ A layer for weighted sum of vectors takes two inputs. - Input: size of weights is M @@ -3235,6 +3256,8 @@ def linear_comb_layer(weights, vectors, size=None, name=None): :type size: int :param name: The Layer Name. :type name: basestring + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3250,6 +3273,7 @@ def linear_comb_layer(weights, vectors, size=None, name=None): type=LayerType.LINEAR_COMBINATION_LAYER, size=size, inputs=[Input(weights.name), Input(vectors.name)], + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.LINEAR_COMBINATION_LAYER, [weights, vectors], size=size) @@ -3259,6 +3283,7 @@ def linear_comb_layer(weights, vectors, size=None, name=None): @wrap_name_default() +@layer_support() def block_expand_layer(input, channel=0, block_x=0, @@ -3267,7 +3292,8 @@ def block_expand_layer(input, stride_y=0, padding_x=0, padding_y=0, - name=None): + name=None, + layer_attr=None): """ Expand feature map to minibatch matrix. - matrix width is: block_y * block_x * channel @@ -3314,6 +3340,8 @@ def block_expand_layer(input, :type padding_y: int :param name: The name of this layer, which can not specify. :type name: None|basestring. + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3328,13 +3356,16 @@ def block_expand_layer(input, padding_y=padding_y) ), type=LayerType.BLOCK_EXPAND, + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.BLOCK_EXPAND, parents=[input]) @wrap_name_default() -def ctc_layer(input, label, size=None, name=None, norm_by_times=False): +@layer_support() +def ctc_layer(input, label, size=None, name=None, norm_by_times=False, + layer_attr=None): """ Connectionist Temporal Classification (CTC) is designed for temporal classication task. That is, for sequence labeling problems where the @@ -3371,6 +3402,8 @@ def ctc_layer(input, label, size=None, name=None, norm_by_times=False): :type name: basestring|None :param norm_by_times: Whether to normalization by times. False by default. :type norm_by_times: bool + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3386,14 +3419,17 @@ def ctc_layer(input, label, size=None, name=None, norm_by_times=False): type=LayerType.CTC_LAYER, size=size, norm_by_times=norm_by_times, - inputs=[input.name, label.name] + inputs=[input.name, label.name], + **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.CTC_LAYER, [input, label], size=size) @wrap_name_default() @wrap_param_attr_default() -def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None): +@layer_support() +def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, + layer_attr=None): """ A layer for calculating the cost of sequential conditional random field model. @@ -3419,6 +3455,8 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None): :type param_attr: ParameterAttribute :param name: The name of this layers. It is not necessary. :type name: None|basestring + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3442,6 +3480,7 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None): type=LayerType.CRF_LAYER, size=size, inputs=ipts, + **ExtraLayerAttribute.to_kwargs(layer_attr) ) parents = [input, label] if weight is not None: @@ -3451,7 +3490,9 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None): @wrap_name_default() @wrap_param_attr_default() -def crf_decoding_layer(input, size, label=None, param_attr=None, name=None): +@layer_support() +def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, + layer_attr=None): """ A layer for calculating the decoding sequence of sequential conditional random field model. The decoding sequence is stored in output.ids. @@ -3469,6 +3510,8 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None): :type param_attr: ParameterAttribute :param name: The name of this layers. It is not necessary. :type name: None|basestring + :param layer_attr: Extra Layer config. + :type layer_attr: ExtraLayerAttribute|None :return: LayerOutput object. :rtype: LayerOutput """ @@ -3485,6 +3528,7 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None): type=LayerType.CRF_DECODING_LAYER, size=size, inputs=ipts, + **ExtraLayerAttribute.to_kwargs(layer_attr) ) parents = [input] if label is not None: @@ -3575,7 +3619,8 @@ def nce_layer(input, label, num_classes, weight=None, @wrap_name_default() -def rank_cost(left, right, label, weight=None, name=None, coeff=1.0): +@layer_support() +def rank_cost(left, right, label, weight=None, name=None, coeff=1.0, layer_attr=None): """ A cost Layer for learning to rank using gradient descent. Details can refer to `papers Date: Mon, 24 Oct 2016 14:10:29 +0800 Subject: [PATCH 039/180] fix build bug in gcc46 (#236) --- paddle/trainer/ThreadParameterUpdater.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/trainer/ThreadParameterUpdater.cpp index a26e9239f987f..d0fda1b6253e3 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/trainer/ThreadParameterUpdater.cpp @@ -258,7 +258,7 @@ void SgdThreadUpdater::threadUpdateSparse( } // For numThreads > 1, MultiGradientMachine is used, which goes // to the above branch. - CHECK_EQ(numThreads, 1); + CHECK_EQ(numThreads, 1UL); mainMat->clearIndices(); } else { auto & m = *para->getMat(PARAMETER_GRADIENT).get(); From e83950b0d2809af70806e3397964343ade296242 Mon Sep 17 00:00:00 2001 From: wenboyang Date: Mon, 24 Oct 2016 14:27:32 +0800 Subject: [PATCH 040/180] error in doc of quick_start (#228) * fix error in doc of quick_start * There are some warning when execute preprocess.sh --- demo/quick_start/preprocess.sh | 2 ++ doc/demo/quick_start/index_en.md | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/demo/quick_start/preprocess.sh b/demo/quick_start/preprocess.sh index fb2bee98beb26..fe2acbbd74898 100755 --- a/demo/quick_start/preprocess.sh +++ b/demo/quick_start/preprocess.sh @@ -20,6 +20,8 @@ set -e +export LC_ALL=C + mkdir -p data/tmp python preprocess.py -i data/reviews_Electronics_5.json.gz # uniq and shuffle diff --git a/doc/demo/quick_start/index_en.md b/doc/demo/quick_start/index_en.md index ee3fa2a2166f4..e7d74512292c8 100644 --- a/doc/demo/quick_start/index_en.md +++ b/doc/demo/quick_start/index_en.md @@ -134,7 +134,7 @@ def process(settings, file_name): You need to add a data provider definition `define_py_data_sources2` in our network configuration. This definition specifies: - The path of the training and testing data (`data/train.list`, `data/test.list`). -- The location of the data provider file (`dataprovider_pow`). +- The location of the data provider file (`dataprovider_bow`). - The function to call to get data. (`process`). - Additional arguments or data. Here it passes the path of word dictionary. From 3dd8c9bea4761652a8e4a4730b52563954ebc42b Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 24 Oct 2016 21:00:12 +0800 Subject: [PATCH 041/180] add maxout layer, including interface and unittest (#229) * add maxout layer, including interface and unittest * follow maxout comments * auto setting channels * fix unittest bug in test_RecurrentGradientMachine --- doc/ui/api/trainer_config_helpers/layers.rst | 6 + paddle/cuda/include/hl_cnn.h | 32 ++++- paddle/cuda/include/stub/hl_cnn_stub.h | 8 ++ paddle/cuda/src/hl_cuda_cnn.cu | 59 +++++++++ paddle/gserver/layers/MaxOutLayer.cpp | 87 ++++++++++++ paddle/gserver/layers/MaxOutLayer.h | 54 ++++++++ paddle/gserver/tests/rnn_data_provider.py | 29 +++- .../tests/sequence_nest_rnn_multi_input.conf | 2 +- .../tests/sequence_rnn_multi_input.conf | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 18 +++ paddle/math/Matrix.cpp | 125 ++++++++++++++++++ paddle/math/Matrix.h | 34 ++++- paddle/math/tests/test_matrixCompare.cpp | 72 ++++++++++ proto/ModelConfig.proto.m4 | 10 ++ python/paddle/trainer/config_parser.py | 32 +++++ .../paddle/trainer_config_helpers/layers.py | 70 +++++++++- .../tests/configs/check.md5 | 1 + .../tests/configs/generate_protostr.sh | 3 +- .../tests/configs/test_maxout.py | 30 +++++ 19 files changed, 665 insertions(+), 9 deletions(-) create mode 100644 paddle/gserver/layers/MaxOutLayer.cpp create mode 100644 paddle/gserver/layers/MaxOutLayer.h create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_maxout.py diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index 55f5623b0faef..5bb88b0615c12 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -73,6 +73,12 @@ img_pool_layer :members: img_pool_layer :noindex: +maxout_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: maxout_layer + :noindex: + Norm Layer ========== diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 5d750333e1e35..d19f4a4bb310a 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -169,7 +169,7 @@ extern void hl_avgpool_forward( * @brief Maximum pool backward. * * @param[in] frameCnt batch size of input image. - * @param[in] outGrad input data. + * @param[in] outGrad output grad data. * @param[in] channels number of channel. * @param[in] height image height. * @param[in] width image width. @@ -240,4 +240,34 @@ extern void hl_CMRNorm_backward( size_t channels, size_t height, size_t width, size_t sizeX, real alpha, real beta); +/** + * @brief MaxOut forward. + * + * @param[in] inData input data. + * @param[out] outData output data. + * @param[out] idData output maxId. + * @param[in] batchSize batchSize. + * @param[in] size number of channels * image height * image width. + * @param[in] featLen feature length = image height * image width. + * @param[in] groups number of groups. + */ +extern void hl_maxout_forward( + const real* inData, real* outData, int* idData, + size_t batchSize, size_t size, size_t featLen, size_t groups); + +/** + * @brief MaxOut backward. + * + * @param[out] inGrad input grad data. + * @param[in] outGrad output grad data. + * @param[in] idData output maxId. + * @param[in] batchSize batchSize. + * @param[in] size number of channels * image height * image width. + * @param[in] featLen feature length = image height * image width. + * @param[in] groups number of groups. + */ +extern void hl_maxout_backward( + real* inGrad, const real* outGrad, const int* idData, + size_t batchSize, size_t size, size_t featLen, size_t groups); + #endif /* HL_CNN_H_ */ diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index 38e359c3eb2f3..5f696986e3c8f 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -89,4 +89,12 @@ inline void hl_CMRNorm_backward( size_t channels, size_t height, size_t width, size_t sizeX, real alpha, real beta) {} +inline void hl_maxout_forward( + const real* inData, real* outData, int* idData, + size_t batchSize, size_t size, size_t featLen, size_t group) {} + +inline void hl_maxout_backward( + real* inGrad, const real* outGrad, const int* idData, + size_t batchSize, size_t size, size_t featLen, size_t group) {} + #endif // HL_CNN_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index abac83a3e0447..baa2fb0d27d74 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -531,3 +531,62 @@ void hl_CMRNorm_backward(size_t frameCnt, const real* inV, height, width, sizeX, alpha, beta, inDiff); CHECK_SYNC("hl_CMRNorm_backward"); } + +__global__ void maxoutFpCompute(size_t nthreads, const real * inData, + real * outData, int* idData, + size_t size, size_t featLen, size_t groups) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if(index < nthreads) { + size_t batch_idx = index / size; + size_t i = index % size; + size_t channel_idx = i / featLen; + size_t feat_idx = i % featLen; + size_t data_idx = (batch_idx * size + channel_idx * featLen) * groups + feat_idx; + real max = inData[data_idx]; + int maxId = 0; + for (size_t g = 1; g < groups; ++g) { + real tmp = inData[data_idx + g * featLen]; + if (tmp > max) { + max = tmp; + maxId = g; + } + } + outData[index] = max; + idData[index] = maxId; + } +} + +void hl_maxout_forward(const real* inData, real* outData, + int* idData, size_t batchSize, size_t size, + size_t featLen, size_t groups) { + int num_kernels = size * batchSize; + int blocks = (num_kernels + 1024 - 1) / 1024; + maxoutFpCompute<<< blocks, 1024, 0, STREAM_DEFAULT>>>( + num_kernels, inData, outData, idData, size, featLen, groups); + CHECK_SYNC("hl_maxout_forward failed"); +} + +__global__ void maxoutBpCompute(size_t nthreads, real* inGrad, + const real* outGrad, const int* idData, + size_t size, size_t featLen, size_t groups) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if(index < nthreads) { + size_t batch_idx = index / size; + size_t i = index % size; + size_t channel_idx = i / featLen; + size_t feat_idx = i % featLen; + size_t newIndex = batch_idx * size; + size_t gradIdx = (channel_idx * groups + (idData + newIndex)[i]) * featLen + feat_idx; + (inGrad + newIndex * groups)[gradIdx] += (outGrad + newIndex)[i]; + } +} + +void hl_maxout_backward(real* inGrad, const real* outGrad, + const int* idData, size_t batchSize, size_t size, + size_t featLen, size_t groups) { + int num_kernels = size * batchSize; + int blocks = (num_kernels + 1024 - 1) / 1024; + maxoutBpCompute<<< blocks, 1024, 0, STREAM_DEFAULT >>>( + num_kernels, inGrad, outGrad, idData, size, featLen, groups); + CHECK_SYNC("hl_maxout_backward failed"); +} diff --git a/paddle/gserver/layers/MaxOutLayer.cpp b/paddle/gserver/layers/MaxOutLayer.cpp new file mode 100644 index 0000000000000..106ab26ba1aae --- /dev/null +++ b/paddle/gserver/layers/MaxOutLayer.cpp @@ -0,0 +1,87 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "MaxOutLayer.h" +#include "hl_gpu.h" +#include "hl_cnn.h" + +namespace paddle { + +REGISTER_LAYER(maxout, MaxOutLayer); + +size_t MaxOutLayer::getSize() { + const MaxOutConfig& maxoutConf = config_.inputs(0).maxout_conf(); + imgSizeH_ = inputLayers_[0]->getOutput().getFrameHeight(); + imgSizeW_ = inputLayers_[0]->getOutput().getFrameWidth(); + if (imgSizeH_ == 0) { + imgSizeH_ = maxoutConf.img_size_y(); + } + if (imgSizeW_ == 0) { + imgSizeW_ = maxoutConf.img_size_x(); + } + + featLen_ = imgSizeH_ * imgSizeW_; + size_t layerSize = featLen_ * outputChannels_; + + getOutput().setFrameHeight(imgSizeH_); + getOutput().setFrameWidth(imgSizeW_); + + return layerSize; +} + +bool MaxOutLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + /* the size of inputs for maxout-layer is 1 */ + CHECK_EQ(config_.inputs_size(), 1UL); + + const MaxOutConfig& conf = config_.inputs(0).maxout_conf(); + groups_ = conf.groups(); + channels_ = conf.channels(); + CHECK_EQ(channels_ % groups_, 0UL); + outputChannels_ = channels_ / groups_; + + return true; +} + +void MaxOutLayer::forward(PassType passType) { + Layer::forward(passType); + + /* malloc memory for the output_ if necessary */ + /* note: one sample correspond to one column */ + size_t batchSize = getInput(0).getBatchSize(); + size_t size = getSize(); + resetOutput(batchSize, size); + MatrixPtr inputV = getInputValue(0); + MatrixPtr outV = getOutputValue(); + + IVector::resizeOrCreate(maxoutId_, size * batchSize, useGpu_); + outV->maxoutForward(*inputV, *maxoutId_, outputChannels_, groups_); +} + +void MaxOutLayer::backward(const UpdateCallback& callback) { + (void)callback; + + /* Do derivation */ + MatrixPtr inputG = getInputGrad(0); + MatrixPtr outG = getOutputGrad(); + + if (inputG) { + inputG->maxoutBackward(*outG, *maxoutId_, outputChannels_, groups_); + } +} + +} // namespace paddle diff --git a/paddle/gserver/layers/MaxOutLayer.h b/paddle/gserver/layers/MaxOutLayer.h new file mode 100644 index 0000000000000..9011a5c332b17 --- /dev/null +++ b/paddle/gserver/layers/MaxOutLayer.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Layer.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * A layer to do max out on conv layer output. + * Input: output of a conv layer. + * Output: feature map size same as input. Channel is (input channel) / groups. + * So the num of channels should be able to devided by groups. + * + * The config file api is maxout_layer. + */ + +class MaxOutLayer : public Layer { +protected: + size_t groups_; + size_t imgSizeH_, imgSizeW_; + /// outputChannels_ = channels_ / groups_ + size_t channels_, outputChannels_; + /// feature length = imgSizeH_ * imgSizeW_ + size_t featLen_; + IVectorPtr maxoutId_; + +public: + /// return imgSizeH_ * imgSizeW_ * outputChannels_; + size_t getSize(); + + explicit MaxOutLayer(const LayerConfig& config) : Layer(config) {} + virtual ~MaxOutLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); +}; + +} // namespace paddle diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/gserver/tests/rnn_data_provider.py index 5c3b062309c51..321c78cb1741b 100644 --- a/paddle/gserver/tests/rnn_data_provider.py +++ b/paddle/gserver/tests/rnn_data_provider.py @@ -14,12 +14,15 @@ from paddle.trainer.PyDataProvider2 import * +# Note that each config should has an independent provider +# in current design of PyDataProvider2. +####################################################### data = [ [[[1, 3, 2], [4, 5, 2]], 0], [[[0, 2], [2, 5], [0, 1, 2]], 1], ] - +# Used for sequence_nest_rnn.conf @provider(input_types=[integer_value_sub_sequence(10), integer_value(3)], should_shuffle=False) @@ -27,7 +30,7 @@ def process_subseq(settings, file_name): for d in data: yield d - +# Used for sequence_rnn.conf @provider(input_types=[integer_value_sequence(10), integer_value(3)], should_shuffle=False) @@ -38,11 +41,32 @@ def process_seq(settings, file_name): seq += subseq yield seq, d[1] +# Used for sequence_nest_rnn_multi_input.conf +@provider(input_types=[integer_value_sub_sequence(10), + integer_value(3)], + should_shuffle=False) +def process_subseq2(settings, file_name): + for d in data: + yield d + +# Used for sequence_rnn_multi_input.conf +@provider(input_types=[integer_value_sequence(10), + integer_value(3)], + should_shuffle=False) +def process_seq2(settings, file_name): + for d in data: + seq = [] + for subseq in d[0]: + seq += subseq + yield seq, d[1] + +########################################################### data2 = [ [[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]] ,0], [[[0, 2], [2, 5], [0, 1, 2]],[[1, 5], [4], [2, 3, 6, 1]], 1], ] +# Used for sequence_nest_rnn_multi_unequalength_inputs.conf @provider(input_types=[integer_value_sub_sequence(10), integer_value_sub_sequence(10), integer_value(2)], @@ -52,6 +76,7 @@ def process_unequalength_subseq(settings, file_name): yield d +# Used for sequence_rnn_multi_unequalength_inputs.conf @provider(input_types=[integer_value_sequence(10), integer_value_sequence(10), integer_value(2)], diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf b/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf index e8222cef525a8..0614958b4719d 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf +++ b/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf @@ -19,7 +19,7 @@ from paddle.trainer_config_helpers import * define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', - obj='process_subseq') + obj='process_subseq2') settings(batch_size=2, learning_rate=0.01) diff --git a/paddle/gserver/tests/sequence_rnn_multi_input.conf b/paddle/gserver/tests/sequence_rnn_multi_input.conf index 968621cab59be..51881e21d971b 100644 --- a/paddle/gserver/tests/sequence_rnn_multi_input.conf +++ b/paddle/gserver/tests/sequence_rnn_multi_input.conf @@ -19,7 +19,7 @@ from paddle.trainer_config_helpers import * define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', - obj='process_seq') + obj='process_seq2') settings(batch_size=2, learning_rate=0.01) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index c5723f8574ab3..eab9bf84141a2 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -307,6 +307,24 @@ TEST(Layer, blockExpandLayer) { } } +TEST(Layer, maxoutLayer) { + TestConfig config; + config.biasSize = 0; + config.layerConfig.set_type("maxout"); + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + MaxOutConfig* maxout = input->mutable_maxout_conf(); + + maxout->set_img_size_x(32); + maxout->set_img_size_y(32); + maxout->set_channels(4); + maxout->set_groups(2); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "maxout", 10, false, useGpu); + } +} void testFcLayer(string format, size_t nnz) { TestConfig config; config.biasSize = 4096; diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 78519ce7aa874..843eabc97d642 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -583,6 +583,42 @@ void GpuMatrix::colMax(Matrix& max) { max.maxCols(*this); } +void GpuMatrix::colMax(IVector& maxIds, Matrix& maxVal) { + LOG(FATAL) << "Is not supported"; +} + +void GpuMatrix::maxoutForward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + CHECK(dynamic_cast(&a)); + CHECK(dynamic_cast(&id)); + CHECK_EQ(a.getHeight(), getHeight()); + + size_t size = getWidth(); + size_t batchSize = getHeight(); + const real* input = a.getData(); + real* output = getData(); + int* idForGpu = id.getData(); + + hl_maxout_forward(input, output, idForGpu, batchSize, size, + size / channels, groups); +} + +void GpuMatrix::maxoutBackward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + CHECK(dynamic_cast(&a)); + CHECK(dynamic_cast(&id)); + CHECK_EQ(a.getHeight(), getHeight()); + + size_t size = a.getWidth(); + size_t batchSize = getHeight(); + real* input = getData(); + const real* output = a.getData(); + const int* idForGpu = id.getData(); + + hl_maxout_backward(input, output, idForGpu, batchSize, size, + size / channels, groups); +} + /*calulate the error of classification */ void GpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) { GpuMatrixPtr output_ptr = std::dynamic_pointer_cast(output); @@ -2748,6 +2784,95 @@ void CpuMatrix::colMax(Matrix& max) { max.maxCols(*this); } +void CpuMatrix::colMax(IVector& maxIds, Matrix& maxVal) { + CHECK(isContiguous()); + CHECK(!maxIds.useGpu() && !maxVal.useGpu()) << "Matrix type are not equal"; + size_t numSamples = getWidth(); + size_t beam = maxVal.getHeight(); + CHECK_EQ(maxIds.getSize(), numSamples * beam); + CHECK_EQ(maxVal.getWidth(), numSamples); + + real* a = getData(); + int* s = maxIds.getData(); + real* t = maxVal.getData(); + size_t dim = getHeight(); + for (size_t i = 0; i < numSamples; i++) { + std::vector> vec; + for (size_t j = 0; j < dim; j++) { + vec.push_back(std::pair(a[i + j * numSamples], j)); + } + + std::partial_sort( + vec.begin(), vec.begin() + beam, vec.end(), + [](const std::pair& l, const std::pair& r) { + return l.first > r.first; + }); + for (size_t j = 0; j < beam; j++) { + t[i + j * numSamples] = vec[j].first; + s[i + j * numSamples] = vec[j].second; + } + } +} + +void CpuMatrix::maxoutForward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + CHECK(dynamic_cast(&a)); + CHECK(dynamic_cast(&id)); + CHECK_EQ(a.getHeight(), getHeight()); + + size_t size = getWidth(); + size_t batchSize = getHeight(); + size_t featLen = size / channels; + const real* input = a.getData(); + int* idForCpu = id.getData(); + + MatrixPtr maxInMat, maxOutMat; + Matrix::resizeOrCreate(maxInMat, groups, size, false, false); + Matrix::resizeOrCreate(maxOutMat, 1, size, false, false); + + for (size_t batch_idx = 0; batch_idx < batchSize; ++batch_idx) { + size_t newIndex = batch_idx * size; + IVectorPtr tmpId = IVector::create(idForCpu + newIndex, size, false); + + for (size_t i = 0; i < channels; ++i) { + size_t newFeatLen = i * featLen; + for (size_t j = 0; j < groups; ++j) { + maxInMat->subMatrix(j, j + 1, newFeatLen, newFeatLen + featLen) + ->copyFrom(input + (newIndex + newFeatLen) * groups + j * featLen, + featLen); + } + } + maxInMat->colMax(*tmpId, *maxOutMat); + this->subRowMatrix(batch_idx, batch_idx + 1)->copyFrom(*maxOutMat); + } +} + +void CpuMatrix::maxoutBackward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + CHECK(dynamic_cast(&a)); + CHECK(dynamic_cast(&id)); + CHECK_EQ(a.getHeight(), getHeight()); + + size_t size = a.getWidth(); + size_t batchSize = getHeight(); + size_t featLen = size / channels; + size_t newFeatLen = groups * featLen; + real* inputG = getData(); + const real* outG = a.getData(); + int* idForCpu = id.getData(); + + for (size_t batch_idx = 0; batch_idx < batchSize; ++batch_idx) { + size_t newIndex = batch_idx * size; + int* idData = idForCpu + newIndex; + + for (size_t i = 0; i < size; ++i) { + int gradIdx = + idData[i] * featLen + (i / featLen) * newFeatLen + i % featLen; + (inputG + newIndex * groups)[gradIdx] += (outG + newIndex)[i]; + } + } +} + void CpuMatrix::rowNormalizeL1(Matrix& out) { CHECK(!out.useGpu()); diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 25104fe1c6d70..047c76a8604cc 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -493,16 +493,40 @@ class Matrix : public BaseMatrix { LOG(FATAL) << "Not implemeted"; } + /** + * set the max of each column of this to mat + */ virtual void colMax(Matrix& max) { LOG(FATAL) << "not implemented"; } + /** + * @brief Get the top k elements of each column of this matrix. + * + * The row ids and values of these elements are stored in + * maxIds and max respectively. where k is the size of maxIds. + * And note that the top k elements are not sorted. + */ + virtual void colMax(IVector& maxIds, Matrix& maxVal) { + LOG(FATAL) << "not implemented"; + } + + virtual void maxoutForward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + LOG(FATAL) << "not implemented"; + } + + virtual void maxoutBackward(Matrix& a, IVector& id, size_t channels, + size_t groups) { + LOG(FATAL) << "not implemented"; + } + virtual void rowMaxId(IVector& maxIds) { LOG(FATAL) << "Not implemented"; } /** * @brief Get the top k elements of each row of this matrix. * * The column ids and values of these elements are stored in - * maxIds and max respectively. Note that the top k - * elements are not sorted. + * maxIds and max respectively. where k is the size of maxIds. + * And note that the top k elements are not sorted. */ virtual void rowMax(IVector& maxIds, Matrix& max) { LOG(FATAL) << "Not implemented"; @@ -1085,6 +1109,9 @@ class GpuMatrix : public Matrix { void rowMax(Matrix& max); void rowMax(IVector& maxIds, Matrix& max); void colMax(Matrix& max); + void colMax(IVector& maxIds, Matrix& max); + void maxoutForward(Matrix& a, IVector& id, size_t channels, size_t groups); + void maxoutBackward(Matrix& a, IVector& id, size_t channels, size_t groups); void oneHotCrossEntropy(Matrix& output, IVector& label); void oneHotCrossEntropyBp(Matrix& outputV, IVector& label); @@ -1395,6 +1422,9 @@ class CpuMatrix : public Matrix { void rowMax(Matrix& max); void rowMax(IVector& maxIds, Matrix& maxVal); void colMax(Matrix& max); + void colMax(IVector& maxIds, Matrix& maxVal); + void maxoutForward(Matrix& a, IVector& id, size_t channels, size_t groups); + void maxoutBackward(Matrix& a, IVector& id, size_t channels, size_t groups); void rowNormalizeL1(Matrix& out); void oneHotCrossEntropy(Matrix& output, IVector& label); diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index e1bda79a8acb1..ac160479a9dfc 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -1999,6 +1999,78 @@ TEST(Matrix, PoolFwdBwd) { } } +void testMaxOutFwdBwd(int numSamples, int imgSizeH, int imgSizeW, + int channels, int groups) { + int inWidth = imgSizeH * imgSizeW * channels; + int outChannels = channels / groups; + int outWidth = imgSizeH * imgSizeW * outChannels; + + // forward + MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr target = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpu = GpuMatrix::create(numSamples, outWidth, false, true); + MatrixPtr targetCheck = CpuMatrix::create(numSamples, outWidth, false, false); + + IVectorPtr id = CpuIVector::create(numSamples * outWidth, false); + IVectorPtr idGpu = GpuIVector::create(numSamples * outWidth, true); + IVectorPtr idCheck = CpuIVector::create(numSamples * outWidth, false); + + input->randomizeUniform(); + inputGpu->copyFrom(*input); + + target->maxoutForward(*input, *id, outChannels, groups); + targetGpu->maxoutForward(*inputGpu, *idGpu, outChannels, groups); + + // check + targetCheck->copyFrom(*targetGpu); + MatrixCheckErr(*target, *targetCheck); + idCheck->copyFrom(*idGpu); + VectorCheckEqual(*id, *idCheck); + + // backward + MatrixPtr inputGrad = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, false, + true); + MatrixPtr targetCheckGrad = CpuMatrix::create(numSamples, inWidth, false, + false); + + inputGrad->randomizeUniform(); + targetGrad->randomizeUniform(); + inputGpuGrad->copyFrom(*inputGrad); + targetGpuGrad->copyFrom(*targetGrad); + + inputGrad->maxoutBackward(*targetGrad, *id, outChannels, groups); + inputGpuGrad->maxoutBackward(*targetGpuGrad, *idGpu, outChannels, groups); + + // check + targetCheckGrad->copyFrom(*inputGpuGrad); + MatrixCheckErr(*inputGrad, *targetCheckGrad); +} + +TEST(Matrix, MaxOutFwdBwd) { + for (auto numSamples : {5, 10}) { + for (auto channels : {8, 16}) { + for (auto imgSizeH : {14, 28}) { + for (auto imgSizeW : {16, 30}) { + for (auto groups : {2, 4}) { + VLOG(3) << " numSamples=" << numSamples + << " channels=" << channels + << " imgSizeH=" << imgSizeH + << " imgSizeW=" << imgSizeW + << " groups=" << groups; + testMaxOutFwdBwd(numSamples, imgSizeH, imgSizeW, channels, groups); + } + } + } + } + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 25e36f9c4c168..70c1f8d563238 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -170,6 +170,15 @@ message BlockExpandConfig { required uint32 img_size_y = 11; } +message MaxOutConfig { + required uint32 channels = 1; + required uint32 groups = 2; + + // The size of input feature map. + required uint32 img_size_x = 3; + required uint32 img_size_y = 4; +} + message ProjectionConfig { required string type = 1; required string name = 2; @@ -225,6 +234,7 @@ message LayerInputConfig { // If the input layer has multi-output. // Set the argument name. optional string input_layer_argument = 9; + optional MaxOutConfig maxout_conf = 10; } message LayerConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index c1e74c7a2d8f7..fe8a5e5d48767 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -469,6 +469,7 @@ def __init__( pool=None, image=None, block_expand=None, + maxout=None, format=None, nnz=None, is_static=None, @@ -785,6 +786,16 @@ def __init__( output_y = 0): self.add_keys(locals()) +@config_class +class MaxOut(Cfg): + def __init__( + self, + channels, + groups, + img_size_x = 0, + img_size_y = 0): + self.add_keys(locals()) + def DataBase(async_load_data=False, constant_slots=None, data_ratio=1, @@ -1082,6 +1093,12 @@ def parse_block_expand(block_expand, input_layer_name, block_expand_conf): int(math.ceil((2 * block_expand.padding_y + block_expand.img_size_y \ - block_expand.block_y) / float(block_expand.stride_y))) +def parse_maxout(maxout, input_layer_name, maxout_conf): + maxout_conf.channels = maxout.channels + maxout_conf.groups = maxout.groups + maxout_conf.img_size_x = maxout.img_size_x + maxout_conf.img_size_y = maxout.img_size_y + # Define an evaluator @config_func def Evaluator( @@ -1705,6 +1722,21 @@ def __init__( self.set_layer_size(block_expand_conf.block_x * block_expand_conf.block_y * block_expand_conf.channels) +@config_layer('maxout') +class MaxOutLayer(LayerBase): + def __init__( + self, + name, + inputs, + **xargs): + super(MaxOutLayer, self).__init__(name, 'maxout', 0, inputs=inputs, **xargs) + input_layer = self.get_input_layer(0) + parse_maxout(self.inputs[0].maxout, + input_layer.name, + self.config.inputs[0].maxout_conf) + maxout_conf = self.config.inputs[0].maxout_conf + self.set_layer_size(g_layer_map[input_layer.name].size / maxout_conf.groups) + # key: cost type # value: cost class g_cost_map = {} diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index d45a9b53dcc94..c4e8fe4abc026 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -55,7 +55,7 @@ 'multi_binary_label_cross_entropy', 'rank_cost', 'lambda_cost', 'huber_cost', # 'block_expand_layer', # TODO(yuyang18): this layer is not correct - 'out_prod_layer', 'print_layer' + 'maxout_layer', 'out_prod_layer', 'print_layer' ] @@ -110,6 +110,7 @@ class LayerType(object): SLOPE_INTERCEPT_LAYER = "slope_intercept" LINEAR_COMBINATION_LAYER = "convex_comb" BLOCK_EXPAND = "blockexpand" + MAXOUT = "maxout" PRINT_LAYER = "print" @@ -3362,6 +3363,73 @@ def block_expand_layer(input, return LayerOutput(name, LayerType.BLOCK_EXPAND, parents=[input]) +@wrap_name_default() +@layer_support() +def maxout_layer(input, + groups, + num_channels=None, + size_x=None, + size_y=None, + name=None, + layer_attr=None): + """ + A layer to do max out on conv layer output. + - Input: output of a conv layer. + - Output: feature map size same as input. Channel is (input channel) / groups. + + So groups should be larger than 1, and the num of channels should be able + to devided by groups. + + Please refer to Paper: + - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf + - Multi-digit Number Recognition from Street View \ + Imagery using Deep Convolutional Neural Networks: \ + https://arxiv.org/pdf/1312.6082v4.pdf + + The simple usage is: + + .. code-block:: python + + maxout = maxout_layer(input, + num_channels=128, + groups=4) + + :param input: The input layer. + :type input: LayerOutput + :param num_channels: The channel number of input layer. If None will be set + automatically from previous output. + :type num_channels: int|None + :param groups: The group number of input layer. + :type groups: int + :param size_x: conv output width. If None will be set + automatically from previous output. + :type size_x: int|None + :param size_y: conv output height. If None will be set + automatically from previous output. + :type size_y: int|None + :param name: The name of this layer, which can not specify. + :type name: None|basestring. + :param layer_attr: Extra Layer attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + assert input.layer_type == LayerType.CONV_LAYER + assert isinstance(input.activation, LinearActivation) + assert groups > 1 + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters + assert num_channels % groups == 0 + Layer(name=name, + inputs=Input(input.name, + maxout=MaxOut(channels=num_channels, + groups=groups)), + type=LayerType.MAXOUT, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput(name, LayerType.MAXOUT, parents=[input]) + + @wrap_name_default() @layer_support() def ctc_layer(input, label, size=None, name=None, norm_by_times=False, diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 96bf3fb2e19d6..88ce5c129e552 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -12,6 +12,7 @@ a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 8bb44e1e5072d0c261572307e7672bda test_grumemory_layer.protostr 1f3510672dce7a9ed25317fc58579ac7 test_hsigmoid.protostr d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr +6fa59551808ee7012bbd24f757e782d2 test_maxout.protostr 251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr 2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 7cdd682056fd4..4b1d2d3d41d52 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -8,7 +8,8 @@ configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers -test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight) +test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight +test_maxout) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py new file mode 100644 index 0000000000000..079e2cf4c4320 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py @@ -0,0 +1,30 @@ +from paddle.trainer_config_helpers import * + +settings( + batch_size=1000, + learning_rate=1e-5 +) + +data = data_layer(name='data', size=2304) + +conv = img_conv_layer(input=data, + filter_size = 3, + num_channels=1, + num_filters=16, + padding=1, + act=LinearActivation(), + bias_attr=True) + +maxout = maxout_layer(input=conv, + num_channels=16, + groups=2) + +pool = img_pool_layer(input=maxout, + num_channels=8, + pool_size=2, + stride=2, + pool_type=MaxPooling()) + +fc = fc_layer(input=pool, size=384, bias_attr=False) + +outputs(fc) From 652b83478fd36ed17d0c911c3564c795a537f440 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 24 Oct 2016 21:06:42 +0800 Subject: [PATCH 042/180] remove deprecated start input in img_pool_layer (#237) --- python/paddle/trainer_config_helpers/layers.py | 6 ++---- python/paddle/trainer_config_helpers/networks.py | 12 ++++-------- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index c4e8fe4abc026..f8c32dc91f10b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1630,7 +1630,7 @@ def img_conv_layer(input, filter_size, num_filters, @layer_support() def img_pool_layer(input, pool_size, name=None, num_channels=None, pool_type=None, - stride=1, start=None, padding=0, layer_attr=None, + stride=1, padding=0, layer_attr=None, pool_size_y=None, stride_y=None, padding_y=None, img_width=None): """ @@ -1661,8 +1661,6 @@ def img_pool_layer(input, pool_size, name=None, :type stride: int :param stride_y: stride height of pooling. It is equal to stride by default. :type stride_y: int|None - :param start: start position of pooling operation. Note it is deprecated now. - :type start: int|None :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :param img_width: the width of input feature map. If it is None, the input feature @@ -1696,7 +1694,7 @@ def img_pool_layer(input, pool_size, name=None, pool_type=type_name, channels=num_channels, size_x=pool_size, - start=start, + start=None, stride=stride, padding=padding, size_y=pool_size_y, diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index d8f96195020b4..65512b327cdc6 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -133,7 +133,7 @@ def simple_img_conv_pool(input, filter_size, num_filters, pool_size, name=None, pool_type=None, act=None, groups=1, conv_stride=1, conv_padding=0, bias_attr=None, num_channel=None, param_attr=None, shared_bias=True, - conv_layer_attr=None, pool_stride=1, pool_start=None, + conv_layer_attr=None, pool_stride=1, pool_padding=0, pool_layer_attr=None): """ Simple image convolution and pooling group. @@ -172,8 +172,6 @@ def simple_img_conv_pool(input, filter_size, num_filters, pool_size, name=None, :type conv_layer_attr: ExtraLayerAttribute :param pool_stride: see img_pool_layer for details :type pool_stride: int - :param pool_start: see img_pool_layer for details. It is deprecated now. - :type pool_start: int :param pool_padding: see img_pool_layer for details :type pool_padding: int :param pool_layer_attr: see img_pool_layer for details @@ -192,7 +190,7 @@ def simple_img_conv_pool(input, filter_size, num_filters, pool_size, name=None, return img_pool_layer(name="%s_pool" % name, input=_conv_, pool_size=pool_size, pool_type=pool_type, stride=pool_stride, - start=pool_start, padding=pool_padding, + padding=pool_padding, layer_attr=pool_layer_attr) @@ -203,7 +201,7 @@ def img_conv_bn_pool(input, filter_size, num_filters, pool_size, name=None, conv_param_attr=None, shared_bias=True, conv_layer_attr=None, bn_param_attr=None, bn_bias_attr=None, bn_layer_attr=None, pool_stride=1, - pool_start=None, pool_padding=0, pool_layer_attr=None): + pool_padding=0, pool_layer_attr=None): """ Convolution, batch normalization, pooling group. @@ -243,8 +241,6 @@ def img_conv_bn_pool(input, filter_size, num_filters, pool_size, name=None, :param bn_layer_attr: ParameterAttribute. :param pool_stride: see img_pool_layer's document. :type pool_stride: int - :param pool_start: see img_pool_layer's document. It is deprecated now. - :type pool_start: int :param pool_padding: see img_pool_layer's document. :type pool_padding: int :param pool_layer_attr: see img_pool_layer's document. @@ -268,7 +264,7 @@ def img_conv_bn_pool(input, filter_size, num_filters, pool_size, name=None, return img_pool_layer(name="%s_pool" % name, input=__bn__, pool_type=pool_type, pool_size=pool_size, stride=pool_stride, - start=pool_start, padding=pool_padding, + padding=pool_padding, layer_attr=pool_layer_attr) From 07b2e5d54c61956eee65ddda7c0f704cb89f7b2a Mon Sep 17 00:00:00 2001 From: Z-TAO Date: Tue, 25 Oct 2016 05:04:36 +0800 Subject: [PATCH 043/180] Fix dataprovider converter for sparse data --- paddle/py_paddle/dataprovider_converter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index 0366bb636c704..6d8f5da3e298f 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -63,7 +63,8 @@ def __init__(self, input_type, pos): def scan(self, dat): self.extend_cols(dat) - self.__rows__.append(len(dat)) + self.__rows__.append(len(dat) + self.__rows__[-1]) + self.__height__ += 1 def extend_cols(self, dat): self.__cols__.extend(dat) From 9c5c38fa2ab67fb09c0100637da5f5efbe5e71d5 Mon Sep 17 00:00:00 2001 From: gangliao Date: Mon, 24 Oct 2016 22:14:34 -0700 Subject: [PATCH 044/180] FIx check type unmatch in MaxOutLayer (#242) Compiled failed on gcc 4.6 --- paddle/gserver/layers/MaxOutLayer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/gserver/layers/MaxOutLayer.cpp b/paddle/gserver/layers/MaxOutLayer.cpp index 106ab26ba1aae..a3de069bf7a6c 100644 --- a/paddle/gserver/layers/MaxOutLayer.cpp +++ b/paddle/gserver/layers/MaxOutLayer.cpp @@ -46,7 +46,7 @@ bool MaxOutLayer::init(const LayerMap& layerMap, Layer::init(layerMap, parameterMap); /* the size of inputs for maxout-layer is 1 */ - CHECK_EQ(config_.inputs_size(), 1UL); + CHECK_EQ(config_.inputs_size(), 1); const MaxOutConfig& conf = config_.inputs(0).maxout_conf(); groups_ = conf.groups(); From d6944dec16933a48396e4dc03e13a365cba60fb8 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Tue, 25 Oct 2016 18:40:05 -0700 Subject: [PATCH 045/180] Sequence tagging demo (#225) --- demo/sequence_tagging/data/get_data.sh | 21 ++ demo/sequence_tagging/data/test.list | 1 + demo/sequence_tagging/data/train.list | 1 + demo/sequence_tagging/dataprovider.py | 258 ++++++++++++++++++ demo/sequence_tagging/linear_crf.py | 84 ++++++ demo/sequence_tagging/readme.md | 45 +++ demo/sequence_tagging/rnn_crf.py | 130 +++++++++ demo/sequence_tagging/train.sh | 10 + demo/sequence_tagging/train_linear.sh | 9 + .../trainer_config_helpers/optimizers.py | 15 +- 10 files changed, 572 insertions(+), 2 deletions(-) create mode 100755 demo/sequence_tagging/data/get_data.sh create mode 100644 demo/sequence_tagging/data/test.list create mode 100644 demo/sequence_tagging/data/train.list create mode 100644 demo/sequence_tagging/dataprovider.py create mode 100644 demo/sequence_tagging/linear_crf.py create mode 100644 demo/sequence_tagging/readme.md create mode 100644 demo/sequence_tagging/rnn_crf.py create mode 100755 demo/sequence_tagging/train.sh create mode 100755 demo/sequence_tagging/train_linear.sh diff --git a/demo/sequence_tagging/data/get_data.sh b/demo/sequence_tagging/data/get_data.sh new file mode 100755 index 0000000000000..e579d6c46ce5e --- /dev/null +++ b/demo/sequence_tagging/data/get_data.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +DIR="$( cd "$(dirname "$0")" ; pwd -P )" +cd $DIR + +wget http://www.cnts.ua.ac.be/conll2000/chunking/train.txt.gz +wget http://www.cnts.ua.ac.be/conll2000/chunking/test.txt.gz diff --git a/demo/sequence_tagging/data/test.list b/demo/sequence_tagging/data/test.list new file mode 100644 index 0000000000000..073c0a0c9063a --- /dev/null +++ b/demo/sequence_tagging/data/test.list @@ -0,0 +1 @@ +data/test.txt.gz diff --git a/demo/sequence_tagging/data/train.list b/demo/sequence_tagging/data/train.list new file mode 100644 index 0000000000000..43c24d5f6484a --- /dev/null +++ b/demo/sequence_tagging/data/train.list @@ -0,0 +1 @@ +data/train.txt.gz diff --git a/demo/sequence_tagging/dataprovider.py b/demo/sequence_tagging/dataprovider.py new file mode 100644 index 0000000000000..6f412d6834be6 --- /dev/null +++ b/demo/sequence_tagging/dataprovider.py @@ -0,0 +1,258 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer.PyDataProvider2 import * +import gzip +import logging + +logging.basicConfig( + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', +) +logger = logging.getLogger('paddle') +logger.setLevel(logging.INFO) + +OOV_POLICY_IGNORE = 0 +OOV_POLICY_USE = 1 +OOV_POLICY_ERROR = 2 + +num_original_columns = 3 + +# Feature combination patterns. +# [[-1,0], [0,0]] means previous token at column 0 and current token at +# column 0 are combined as one feature. +patterns = [ + [[-2,0]], + [[-1,0]], + [[0,0]], + [[1,0]], + [[2,0]], + + [[-1,0], [0,0]], + [[0,0], [1,0]], + + [[-2,1]], + [[-1,1]], + [[0,1]], + [[1,1]], + [[2,1]], + [[-2,1], [-1,1]], + [[-1,1], [0,1]], + [[0,1], [1,1]], + [[1,1], [2,1]], + + [[-2,1], [-1,1], [0,1]], + [[-1,1], [0,1], [1,1]], + [[0,1], [1,1], [2,1]], +] + +dict_label = { + 'B-ADJP': 0, + 'I-ADJP': 1, + 'B-ADVP': 2, + 'I-ADVP': 3, + 'B-CONJP': 4, + 'I-CONJP': 5, + 'B-INTJ': 6, + 'I-INTJ': 7, + 'B-LST': 8, + 'I-LST': 9, + 'B-NP': 10, + 'I-NP': 11, + 'B-PP': 12, + 'I-PP': 13, + 'B-PRT': 14, + 'I-PRT': 15, + 'B-SBAR': 16, + 'I-SBAR': 17, + 'B-UCP': 18, + 'I-UCP': 19, + 'B-VP': 20, + 'I-VP': 21, + 'O': 22 +} + +def make_features(sequence): + length = len(sequence) + num_features = len(sequence[0]) + def get_features(pos): + if pos < 0: + return ['#B%s' % -pos] * num_features + if pos >= length: + return ['#E%s' % (pos - length + 1)] * num_features + return sequence[pos] + + for i in xrange(length): + for pattern in patterns: + fname = '/'.join([get_features(i+pos)[f] for pos, f in pattern]) + sequence[i].append(fname) + +''' +Source file format: +Each line is for one timestep. The features are separated by space. +An empty line indicates end of a sequence. + +cutoff: a list of numbers. If count of a feature is smaller than this, + it will be ignored. +if oov_policy[i] is OOV_POLICY_USE, id 0 is reserved for OOV features of +i-th column. + +return a list of dict for each column +''' +def create_dictionaries(filename, cutoff, oov_policy): + def add_to_dict(sequence, dicts): + num_features = len(dicts) + for features in sequence: + l = len(features) + assert l == num_features, "Wrong number of features " + line + for i in xrange(l): + if features[i] in dicts[i]: + dicts[i][features[i]] += 1 + else: + dicts[i][features[i]] = 1 + + num_features = len(cutoff) + dicts = [] + for i in xrange(num_features): + dicts.append(dict()) + + f = gzip.open(filename, 'rb') + + sequence = [] + + for line in f: + line = line.strip() + if not line: + make_features(sequence) + add_to_dict(sequence, dicts) + sequence = [] + continue + features = line.split(' ') + sequence.append(features) + + + for i in xrange(num_features): + dct = dicts[i] + n = 1 if oov_policy[i] == OOV_POLICY_USE else 0 + todo = [] + for k, v in dct.iteritems(): + if v < cutoff[i]: + todo.append(k) + else: + dct[k] = n + n += 1 + + if oov_policy[i] == OOV_POLICY_USE: + # placeholder so that len(dct) will be the number of features + # including OOV + dct['#OOV#'] = 0 + + logger.info('column %d dict size=%d, ignored %d' % (i, n, len(todo))) + for k in todo: + del dct[k] + + f.close() + return dicts + + +def initializer(settings, **xargs): + cutoff = [3, 1, 0] + cutoff += [3] * len(patterns) + oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR] + oov_policy += [OOV_POLICY_IGNORE] * len(patterns) + dicts = create_dictionaries('data/train.txt.gz', cutoff, oov_policy) + dicts[2] = dict_label + settings.dicts = dicts + settings.oov_policy = oov_policy + input_types = [] + num_features = len(dicts) + for i in xrange(num_original_columns): + input_types.append(integer_sequence(len(dicts[i]))) + logger.info("slot %s size=%s" % (i, len(dicts[i]))) + if patterns: + dim = 0 + for i in xrange(num_original_columns, num_features): + dim += len(dicts[i]) + input_types.append(sparse_binary_vector_sequence(dim)) + logger.info("feature size=%s" % dim) + settings.input_types = input_types + +''' +if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not +existed in dicts[i] will be assigned to id 0. +if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist +in dicts[i]. +''' +@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) +def process(settings, filename): + input_file = filename + dicts = settings.dicts + oov_policy = settings.oov_policy + + def gen_sample(sequence): + num_features = len(dicts) + sample = [list() for i in xrange(num_original_columns)] + if patterns: + sample.append([]) + for features in sequence: + assert len(features) == num_features, \ + "Wrong number of features: " + line + for i in xrange(num_original_columns): + id = dicts[i].get(features[i], -1) + if id != -1: + sample[i].append(id) + elif oov_policy[i] == OOV_POLICY_IGNORE: + sample[i].append(0xffffffff) + elif oov_policy[i] == OOV_POLICY_ERROR: + logger.fatal("Unknown token: %s" % features[i]) + else: + sample[i].append(0) + + if patterns: + dim = 0 + vec = [] + for i in xrange(num_original_columns, num_features): + id = dicts[i].get(features[i], -1) + if id != -1: + vec.append(dim + id) + elif oov_policy[i] == OOV_POLICY_IGNORE: + pass + elif oov_policy[i] == OOV_POLICY_ERROR: + logger.fatal("Unknown token: %s" % features[i]) + else: + vec.ids.append(dim + 0) + + dim += len(dicts[i]) + sample[-1].append(vec) + return sample + + num_features = len(dicts) + f = gzip.open(input_file, 'rb') + + num_sequences = 0 + sequence = [] + for line in f: + line = line.strip() + if not line: + make_features(sequence) + yield gen_sample(sequence) + sequence = [] + num_sequences += 1 + continue + features = line.split(' ') + sequence.append(features) + + f.close() + + logger.info("num_sequences=%s" % num_sequences) + diff --git a/demo/sequence_tagging/linear_crf.py b/demo/sequence_tagging/linear_crf.py new file mode 100644 index 0000000000000..2bd1a20bc52fc --- /dev/null +++ b/demo/sequence_tagging/linear_crf.py @@ -0,0 +1,84 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +import math + +define_py_data_sources2(train_list="data/train.list", + test_list="data/test.list", + module="dataprovider", + obj="process") + + +batch_size = 1 +settings( + learning_method=MomentumOptimizer(), + batch_size=batch_size, + regularization=L2Regularization(batch_size * 1e-4), + average_window=0.5, + learning_rate=1e-1, + learning_rate_decay_a=1e-5, + learning_rate_decay_b=0.25, +) + +num_label_types=23 + +def get_simd_size(size): + return int(math.ceil(float(size) / 8)) * 8 + +# Currently, in order to use sparse_update=True, +# the size has to be aligned. +num_label_types = get_simd_size(num_label_types) + +features = data_layer(name="features", size=76328) +word = data_layer(name="word", size=6778) +pos = data_layer(name="pos", size=44) +chunk = data_layer(name="chunk", + size=num_label_types) + +crf_input = fc_layer( + input=features, + size=num_label_types, + act=LinearActivation(), + bias_attr=False, + param_attr=ParamAttr(initial_std=0, sparse_update=True)) + +crf=crf_layer( + input=crf_input, + label=chunk, + param_attr=ParamAttr(name="crfw", initial_std=0), +) + +crf_decoding=crf_decoding_layer( + size=num_label_types, + input=crf_input, + label=chunk, + param_attr=ParamAttr(name="crfw"), +) + +sum_evaluator( + name="error", + input=crf_decoding, +) + +chunk_evaluator( + name="chunk_f1", + input =[crf_decoding, chunk], + chunk_scheme="IOB", + num_chunk_types=11, +) + +inputs(word, pos, chunk, features) +outputs(crf) diff --git a/demo/sequence_tagging/readme.md b/demo/sequence_tagging/readme.md new file mode 100644 index 0000000000000..2e17fffb83c53 --- /dev/null +++ b/demo/sequence_tagging/readme.md @@ -0,0 +1,45 @@ +# Sequence Tagging + +This demo is a sequence model for assigning tags to each token in a sentence. The task is described at CONLL2000 Text Chunking task. + +## Download data +```bash +cd demo/sequence_tagging +./data/get_data.sh +``` + +## Train model +```bash +cd demo/sequence_tagging +./train.sh +``` + +## Model description + +We provide two models. One is a linear CRF model (linear_crf.py) with is equivalent to the one at leon.bottou.org/projects/sgd. The second one is a stacked bidirectional RNN and CRF model (rnn_crf.py). +
+ + + + + + + + + + + + + + + + + + + + + + +
Model nameNumber of parametersF1 score
linear_crf 1.8M 0.937
rnn_crf 960K 0.941
+
+
diff --git a/demo/sequence_tagging/rnn_crf.py b/demo/sequence_tagging/rnn_crf.py new file mode 100644 index 0000000000000..fb157bf3ea719 --- /dev/null +++ b/demo/sequence_tagging/rnn_crf.py @@ -0,0 +1,130 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +import math + +define_py_data_sources2(train_list="data/train.list", + test_list="data/test.list", + module="dataprovider", + obj="process") + +batch_size = 16 +settings( + learning_method=MomentumOptimizer(), + batch_size=batch_size, + regularization=L2Regularization(batch_size * 1e-5), + average_window=0.5, + learning_rate = 2e-3, + learning_rate_decay_a = 5e-7, + learning_rate_decay_b = 0.5, +) + +word_dim=128 +hidden_dim = 128 +with_rnn = True + +initial_std=1/math.sqrt(hidden_dim) +param_attr=ParamAttr(initial_std=initial_std) +cpu_layer_attr=ExtraLayerAttribute(device=-1) + +default_device(0) + +num_label_types=23 + +features = data_layer(name="features", size=76328) +word = data_layer(name="word", size=6778) +pos = data_layer(name="pos", size=44) +chunk = data_layer(name="chunk", + size=num_label_types, + layer_attr=cpu_layer_attr) + +emb = embedding_layer( + input=word, size=word_dim, param_attr=ParamAttr(initial_std=0)) + +hidden1 = mixed_layer( + size=hidden_dim, + act=STanhActivation(), + bias_attr=True, + input=[full_matrix_projection(emb), + table_projection(pos, param_attr=param_attr)] +) + +if with_rnn: + rnn1 = recurrent_layer( + act=ReluActivation(), + bias_attr=True, + input=hidden1, + param_attr=ParamAttr(initial_std=0), + ) + +hidden2 = mixed_layer( + size=hidden_dim, + act=STanhActivation(), + bias_attr=True, + input=[full_matrix_projection(hidden1) + ] + ([ + full_matrix_projection(rnn1, param_attr=ParamAttr(initial_std=0)) + ] if with_rnn else []), +) + +if with_rnn: + rnn2=recurrent_layer( + reverse=True, + act=ReluActivation(), + bias_attr=True, + input=hidden2, + param_attr=ParamAttr(initial_std=0), + ) + +crf_input = mixed_layer( + size=num_label_types, + bias_attr=False, + input=[ + full_matrix_projection(hidden2), + ] + ([ + full_matrix_projection(rnn2, param_attr=ParamAttr(initial_std=0)) + ] if with_rnn else []), +) + +crf = crf_layer( + input=crf_input, + label=chunk, + param_attr=ParamAttr(name="crfw", initial_std=0), + layer_attr=cpu_layer_attr, +) + +crf_decoding = crf_decoding_layer( + size=num_label_types, + input=crf_input, + label=chunk, + param_attr=ParamAttr(name="crfw"), + layer_attr=cpu_layer_attr, +) + +sum_evaluator( + name="error", + input=crf_decoding, +) + +chunk_evaluator( + name="chunk_f1", + input =[crf_decoding, chunk], + chunk_scheme="IOB", + num_chunk_types=11, +) + +inputs(word, pos, chunk, features) +outputs(crf) diff --git a/demo/sequence_tagging/train.sh b/demo/sequence_tagging/train.sh new file mode 100755 index 0000000000000..9a706b98d8686 --- /dev/null +++ b/demo/sequence_tagging/train.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +paddle train \ + --config rnn_crf.py \ + --parallel_nn=1 \ + --use_gpu=1 \ + --dot_period=10 \ + --log_period=1000 \ + --test_period=0 \ + --num_passes=10 diff --git a/demo/sequence_tagging/train_linear.sh b/demo/sequence_tagging/train_linear.sh new file mode 100755 index 0000000000000..597b5afea9c63 --- /dev/null +++ b/demo/sequence_tagging/train_linear.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +paddle train \ + --config linear_crf.py \ + --use_gpu=0 \ + --dot_period=100 \ + --log_period=10000 \ + --test_period=0 \ + --num_passes=10 diff --git a/python/paddle/trainer_config_helpers/optimizers.py b/python/paddle/trainer_config_helpers/optimizers.py index 4660a6b5003da..d4b947517b7d0 100644 --- a/python/paddle/trainer_config_helpers/optimizers.py +++ b/python/paddle/trainer_config_helpers/optimizers.py @@ -362,6 +362,13 @@ def __extends__(dict1, dict2): default_factory=lambda _: BaseRegularization()) def settings(batch_size, learning_rate=1e-3, + learning_rate_decay_a=0., + learning_rate_decay_b=0., + learning_rate_schedule='poly', + learning_rate_args='', + average_window=0, + do_average_in_cpu=False, + max_average_window=None, learning_method=None, regularization=None, is_async=False, @@ -408,10 +415,14 @@ def settings(batch_size, else: algorithm = 'owlqn' + args=['batch_size', 'learning_rate', 'learning_rate_decay_a', + 'learning_rate_decay_b', 'learning_rate_schedule', + 'learning_rate_args', 'average_window', 'do_average_in_cpu', + 'max_average_window'] kwargs = dict() - kwargs['batch_size'] = batch_size - kwargs['learning_rate'] = learning_rate kwargs['algorithm'] = algorithm + for arg in args: + kwargs[arg] = locals()[arg] kwargs = __extends__(kwargs, learning_method.to_setting_kwargs()) learning_method.extra_settings() From d1d52bb7d4c1f9ad4703aeb17027135bcfc41a7d Mon Sep 17 00:00:00 2001 From: emailweixu Date: Tue, 25 Oct 2016 18:49:40 -0700 Subject: [PATCH 046/180] Update contribute_to_paddle.md (#248) --- doc/build/contribute_to_paddle.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/build/contribute_to_paddle.md b/doc/build/contribute_to_paddle.md index 06fcff6172075..bbdbb4d4227d0 100644 --- a/doc/build/contribute_to_paddle.md +++ b/doc/build/contribute_to_paddle.md @@ -99,3 +99,7 @@ git pull --rebase upstream HEAD git push -f origin HEAD ``` Now your Pull Request is updated with the latest version. + +## Revise your pull request + +When you revise your pull request according to reviewer's comments, please use 'git commit' instead of 'git commit --amend' to commit your changes so that the reviewers can see the difference between the new pull requrest and the old pull request. From 46bd5f53e3e536e712f3ee36c700834206db7e7a Mon Sep 17 00:00:00 2001 From: backyes Date: Wed, 26 Oct 2016 15:43:15 +0800 Subject: [PATCH 047/180] add input sparse data check for sparse layer at runtime (#247) * add input sparse data check for sparse layer at runtime, to avoid invalid data access at pserver end while doing prefetch * remote sparse design support binary sparse and float saprse both --- paddle/math/SparseRowMatrix.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/paddle/math/SparseRowMatrix.cpp b/paddle/math/SparseRowMatrix.cpp index 0b5de252258a9..6986624d25c7a 100644 --- a/paddle/math/SparseRowMatrix.cpp +++ b/paddle/math/SparseRowMatrix.cpp @@ -227,12 +227,18 @@ void CacheRowCpuMatrix::mul(CpuSparseMatrix* a, CpuMatrix* b, real scaleAB, void SparsePrefetchRowCpuMatrix::addRows(const unsigned int* ids, size_t len) { std::vector& localIndices = indexDictHandle_->localIndices; + for (size_t i = 0; i < len; i ++) { + CHECK_LT(*(ids + i), this->getHeight()) + << "id:" << *(ids + i) << "Height:" << this->getHeight() + << "sparse id value exceeds the max input dimension, " + << "it could be caused invalid input data samples"; + } localIndices.insert(localIndices.end(), ids, ids + len); } void SparsePrefetchRowCpuMatrix::addRows(MatrixPtr input) { CpuSparseMatrix* mat = dynamic_cast(input.get()); - CHECK(mat) << "only support non value sparse matrix"; + CHECK(mat) << "only support sparse matrix"; addRows(reinterpret_cast(mat->getCols()), mat->getElementCnt()); } @@ -243,7 +249,13 @@ void SparsePrefetchRowCpuMatrix::addRows(IVectorPtr ids) { int* index = ids->getData(); for (size_t i = 0; i < numSamples; ++i) { if (index[i] == -1) continue; - localIndices.push_back((unsigned int)index[i]); + + unsigned int id = (unsigned int)index[i]; + CHECK_LT(id, this->getHeight()) + << "id:" << id << "Height:" << this->getHeight() + << "sparse id value exceeds the max input dimension, " + << "it could be caused invalid input data samples"; + localIndices.push_back(id); } } From cbe734b396a6f6844cafb94b0de8bd736360ef90 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Thu, 27 Oct 2016 00:23:46 -0700 Subject: [PATCH 048/180] Python trainer api (#193) * Python trainer API and demo * Adding missing PaddleAPIPrivate.h * Adding api_train.sh * More comments * Bump up patch version to 0b3 --- CMakeLists.txt | 2 +- cmake/swig.cmake | 1 + demo/quick_start/api_train.py | 114 +++++++++++ demo/quick_start/api_train.sh | 29 +++ demo/quick_start/train.sh | 2 +- demo/quick_start/trainer_config.lr.py | 3 +- demo/sentiment/predict.py | 4 +- paddle/api/Arguments.cpp | 19 +- paddle/api/CMakeLists.txt | 3 + paddle/api/ConfigParser.cpp | 44 ++--- paddle/api/GradientMachine.cpp | 18 +- paddle/api/PaddleAPI.h | 40 ++-- paddle/api/PaddleAPIPrivate.h | 68 +++++++ paddle/api/ParameterOptimizer.cpp | 4 +- paddle/api/Trainer.cpp | 175 +++++++++-------- paddle/api/test/run_tests.sh | 2 +- paddle/api/test/testTrain.py | 3 +- paddle/api/test/testTrainer.py | 63 +++++++ paddle/py_paddle/dataprovider_converter.py | 2 +- paddle/py_paddle/util.py | 107 +++++++++-- paddle/trainer/Tester.cpp | 56 ++++-- paddle/trainer/Tester.h | 12 +- paddle/trainer/Trainer.cpp | 208 ++++++++++++--------- paddle/trainer/Trainer.h | 22 ++- paddle/trainer/TrainerInternal.cpp | 10 +- paddle/trainer/TrainerInternal.h | 4 +- proto/TrainerConfig.proto.m4 | 2 +- python/paddle/proto/__init__.py | 2 + 28 files changed, 707 insertions(+), 312 deletions(-) create mode 100644 demo/quick_start/api_train.py create mode 100755 demo/quick_start/api_train.sh create mode 100644 paddle/api/PaddleAPIPrivate.h create mode 100644 paddle/api/test/testTrainer.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 4613155f7700b..e96ce28248ee5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 2.8) project(paddle CXX C) set(PADDLE_MAJOR_VERSION 0) set(PADDLE_MINOR_VERSION 8) -set(PADDLE_PATCH_VERSION 0b2) +set(PADDLE_PATCH_VERSION 0b3) set(PADDLE_VERSION ${PADDLE_MAJOR_VERSION}.${PADDLE_MINOR_VERSION}.${PADDLE_PATCH_VERSION}) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") diff --git a/cmake/swig.cmake b/cmake/swig.cmake index f5c1bcc79b3dc..160d7ee56a9c6 100644 --- a/cmake/swig.cmake +++ b/cmake/swig.cmake @@ -27,6 +27,7 @@ function(generate_python_api target_name) COMMAND swig -python -c++ -outcurrentdir -I../ api/Paddle.swig && mv ${PROJ_ROOT}/paddle/swig_paddle.py ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py DEPENDS ${PROJ_ROOT}/paddle/api/Paddle.swig + ${PROJ_ROOT}/paddle/api/PaddleAPI.h WORKING_DIRECTORY ${PROJ_ROOT}/paddle COMMENT "Generate Python API from swig") add_custom_target(${target_name} ALL DEPENDS diff --git a/demo/quick_start/api_train.py b/demo/quick_start/api_train.py new file mode 100644 index 0000000000000..5ae19b8d26534 --- /dev/null +++ b/demo/quick_start/api_train.py @@ -0,0 +1,114 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import itertools +import random + +from paddle.trainer.config_parser import parse_config +from py_paddle import swig_paddle as api +from py_paddle import DataProviderConverter +from paddle.trainer.PyDataProvider2 \ + import integer_value, integer_value_sequence, sparse_binary_vector + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument("--train_data", + type=str, required=False, help="train data file") + parser.add_argument("--test_data", type=str, help="test data file") + parser.add_argument("--config", + type=str, required=True, help="config file name") + parser.add_argument("--dict_file", required=True, help="dictionary file") + parser.add_argument("--seq", + default=1, type=int, + help="whether use sequence training") + parser.add_argument("--use_gpu", default=0, type=int, + help="whether use GPU for training") + parser.add_argument("--trainer_count", default=1, type=int, + help="Number of threads for training") + parser.add_argument("--num_passes", default=5, type=int, + help="Number of training passes") + return parser.parse_args() + +UNK_IDX = 0 + +def load_data(file_name, word_dict): + with open(file_name, 'r') as f: + for line in f: + label, comment = line.strip().split('\t') + words = comment.split() + word_slot = [word_dict.get(w, UNK_IDX) for w in words] + yield word_slot, int(label) + +def load_dict(dict_file): + word_dict = dict() + with open(dict_file, 'r') as f: + for i, line in enumerate(f): + w = line.strip().split()[0] + word_dict[w] = i + return word_dict + +def main(): + options = parse_arguments() + api.initPaddle("--use_gpu=%s" % options.use_gpu, + "--trainer_count=%s" % options.trainer_count) + + word_dict = load_dict(options.dict_file) + train_dataset = list(load_data(options.train_data, word_dict)) + if options.test_data: + test_dataset = list(load_data(options.test_data, word_dict)) + else: + test_dataset = None + + trainer_config = parse_config(options.config, + "dict_file=%s" % options.dict_file) + # No need to have data provider for trainer + trainer_config.ClearField('data_config') + trainer_config.ClearField('test_data_config') + + # create a GradientMachine from the model configuratin + model = api.GradientMachine.createFromConfigProto( + trainer_config.model_config) + # create a trainer for the gradient machine + trainer = api.Trainer.create(trainer_config, model) + + # create a data converter which converts data to PaddlePaddle + # internal format + input_types = [ + integer_value_sequence(len(word_dict)) if options.seq + else sparse_binary_vector(len(word_dict)), + integer_value(2)] + converter = DataProviderConverter(input_types) + + batch_size = trainer_config.opt_config.batch_size + trainer.startTrain() + for train_pass in xrange(options.num_passes): + trainer.startTrainPass() + random.shuffle(train_dataset) + for pos in xrange(0, len(train_dataset), batch_size): + batch = itertools.islice(train_dataset, pos, pos + batch_size) + size = min(batch_size, len(train_dataset) - pos) + trainer.trainOneDataBatch(size, converter(batch)) + trainer.finishTrainPass() + if test_dataset: + trainer.startTestPeriod(); + for pos in xrange(0, len(test_dataset), batch_size): + batch = itertools.islice(test_dataset, pos, pos + batch_size) + size = min(batch_size, len(test_dataset) - pos) + trainer.testOneDataBatch(size, converter(batch)) + trainer.finishTestPeriod() + trainer.finishTrain() + +if __name__ == '__main__': + main() diff --git a/demo/quick_start/api_train.sh b/demo/quick_start/api_train.sh new file mode 100755 index 0000000000000..40e9d0a09aaa6 --- /dev/null +++ b/demo/quick_start/api_train.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +# Note: if using trainer_config.emb.py, trainer_config.cnn.py +# or trainer_config.lstm.py, you need to change --seq to --seq=1 +# because they are sequence models. +python api_train.py \ + --config=trainer_config.lr.py \ + --trainer_count=2 \ + --num_passes=15 \ + --use_gpu=0 \ + --seq=0 \ + --train_data=data/train.txt \ + --test_data=data/test.txt \ + --dict_file=data/dict.txt \ + 2>&1 | tee 'train.log' diff --git a/demo/quick_start/train.sh b/demo/quick_start/train.sh index ea4e32249a3d0..49806292a4ec5 100755 --- a/demo/quick_start/train.sh +++ b/demo/quick_start/train.sh @@ -24,7 +24,7 @@ paddle train \ --config=$cfg \ --save_dir=./output \ --trainer_count=4 \ - --log_period=20 \ + --log_period=100 \ --num_passes=15 \ --use_gpu=false \ --show_parameter_stats_period=100 \ diff --git a/demo/quick_start/trainer_config.lr.py b/demo/quick_start/trainer_config.lr.py index 119e3849a4b7e..c6059947f30b3 100644 --- a/demo/quick_start/trainer_config.lr.py +++ b/demo/quick_start/trainer_config.lr.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * -dict_file = "./data/dict.txt" +dict_file = get_config_arg('dict_file', str, "./data/dict.txt") word_dict = dict() with open(dict_file, 'r') as f: for i, line in enumerate(f): @@ -63,7 +63,6 @@ label = data_layer(name="label", size=2) # Define cross-entropy classification loss and error. - classification_cost(input=output, label=label) cls = classification_cost(input=output, label=label) outputs(cls) else: diff --git a/demo/sentiment/predict.py b/demo/sentiment/predict.py index c61628d34db4a..7d0baeabbba68 100755 --- a/demo/sentiment/predict.py +++ b/demo/sentiment/predict.py @@ -46,8 +46,8 @@ def __init__(self, train_conf, dict_file, model_dir=None, label_file = None): conf = parse_config(train_conf, "is_predict=1") self.network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) self.network.loadParameters(self.model_dir) - slots = [integer_value_sequence(self.dict_dim)] - self.converter = DataProviderConverter(slots) + input_types = [integer_value_sequence(self.dict_dim)] + self.converter = DataProviderConverter(input_types) def load_dict(self): """ diff --git a/paddle/api/Arguments.cpp b/paddle/api/Arguments.cpp index 8f73e7626042c..6f51d55120069 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/api/Arguments.cpp @@ -14,27 +14,10 @@ limitations under the License. */ #include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" #include "paddle/parameter/Argument.h" -struct ArgumentsPrivate { - std::vector outputs; - - inline paddle::Argument& getArg(size_t idx) throw(RangeError) { - if (idx < outputs.size()) { - return outputs[idx]; - } else { - RangeError e; - throw e; - } - } - - template - std::shared_ptr& cast(void* rawPtr) const { - return *(std::shared_ptr*)(rawPtr); - } -}; - size_t Arguments::getSlotNum() const { return m->outputs.size(); } Arguments* Arguments::createArguments(size_t slotNum) { diff --git a/paddle/api/CMakeLists.txt b/paddle/api/CMakeLists.txt index fe0da763514a6..9b2d122a09ada 100644 --- a/paddle/api/CMakeLists.txt +++ b/paddle/api/CMakeLists.txt @@ -40,6 +40,8 @@ configure_file( generate_python_api(python_swig_sources) +file(GLOB PY_PADDLE_PYTHON_FILES ${PROJ_ROOT}/paddle/py_paddle/*.py) + # TODO(yuyang18) : make wheel name calculated by cmake add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp COMMAND ${PYTHON_EXECUTABLE} setup.py bdist_wheel @@ -55,6 +57,7 @@ add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/dist/.timestamp paddle_trainer paddle_api paddle_cuda + ${PY_PADDLE_PYTHON_FILES} ) install(DIRECTORY ${PROJ_ROOT}/paddle/dist/ diff --git a/paddle/api/ConfigParser.cpp b/paddle/api/ConfigParser.cpp index c5ee784a0bda0..25d94f5a6a125 100644 --- a/paddle/api/ConfigParser.cpp +++ b/paddle/api/ConfigParser.cpp @@ -14,17 +14,9 @@ limitations under the License. */ #include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" #include "paddle/trainer/Trainer.h" -struct TrainerConfigPrivate { - std::shared_ptr conf; - TrainerConfigPrivate() : conf(std::make_shared()) {} -}; - -struct ModelConfigPrivate { - std::shared_ptr conf; -}; - struct ParameterConfigPrivate { paddle::ParameterPtr parameter; paddle::ParameterConfig config; @@ -39,19 +31,6 @@ struct ParameterConfigPrivate { } }; -struct OptimizationConfigPrivate { - std::shared_ptr trainer_config; - paddle::OptimizationConfig config; - - paddle::OptimizationConfig& getConfig() { - if (trainer_config != nullptr) { - return *trainer_config->mutable_opt_config(); - } else { - return config; - } - } -}; - TrainerConfig::TrainerConfig() : m(new TrainerConfigPrivate()) {} TrainerConfig::~TrainerConfig() { delete m; } @@ -59,10 +38,19 @@ TrainerConfig::~TrainerConfig() { delete m; } TrainerConfig* TrainerConfig::createFromTrainerConfigFile( const std::string& confPath) { LOG(INFO) << "load trainer config from " << confPath; - paddle::TrainerConfigHelper helper(confPath); - //! TODO(yuyang18): Make TrainerConfigPrivate to TrainerConfigHelper + auto conf = std::make_shared(confPath); auto retv = new TrainerConfig(); - *retv->m->conf = helper.getConfig(); + retv->m->conf = conf; + return retv; +} + +TrainerConfig* TrainerConfig::createFromProtoString( + const std::string& str) { + auto retv = new TrainerConfig(); + paddle::TrainerConfig trainerConfigProto; + auto conf = std::make_shared(trainerConfigProto); + CHECK(conf->getMutableConfig().ParseFromString(str)); + retv->m->conf = conf; return retv; } @@ -76,10 +64,6 @@ ModelConfig* TrainerConfig::getModelConfig() const { return retv; } -void* ModelConfig::getPaddleModelConfig() const { - return m->conf->mutable_model_config(); -} - ParameterConfig::ParameterConfig() : m(new ParameterConfigPrivate()) {} ParameterConfig::~ParameterConfig() { @@ -132,8 +116,6 @@ OptimizationConfig* TrainerConfig::getOptimizationConfig() const { return opt_config; } -void* OptimizationConfig::getRawPtr() { return &m->getConfig(); } - OptimizationConfig* OptimizationConfig::createFromProtoString( const std::string& str) { auto conf = new OptimizationConfig(); diff --git a/paddle/api/GradientMachine.cpp b/paddle/api/GradientMachine.cpp index 6f1d63575a80f..bef499c67858b 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/api/GradientMachine.cpp @@ -14,30 +14,22 @@ limitations under the License. */ #include "PaddleAPI.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "PaddleAPIPrivate.h" + #include "paddle/gserver/gradientmachines/NeuralNetwork.h" #include "Internal.h" std::vector GradientMachine::defaultParamTypes = { PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM}; -struct GradientMachinePrivate { - std::shared_ptr machine; - - template - inline T& cast(void* ptr) { - return *(T*)(ptr); - } -}; - GradientMachine::GradientMachine() : m(new GradientMachinePrivate()) {} GradientMachine::~GradientMachine() { delete m; } GradientMachine* GradientMachine::createFromPaddleModelPtr( - void* confPtr, GradientMatchineCreateMode mode, + const void* confPtr, GradientMatchineCreateMode mode, const std::vector& types) { - auto& conf = *(paddle::ModelConfig*)(confPtr); + auto& conf = *(const paddle::ModelConfig*)(confPtr); std::vector realTypes; staticCastVector(&realTypes, types); auto machineRawPtr = paddle::GradientMachine::create(conf, mode, realTypes); @@ -66,7 +58,7 @@ GradientMachine* GradientMachine::createByConfigProtoStr( GradientMachine* GradientMachine::createByModelConfig( ModelConfig* conf, GradientMatchineCreateMode mode, const std::vector& types) { - auto confPtr = (paddle::ModelConfig*)conf->getPaddleModelConfig(); + auto confPtr = &conf->m->conf->getModelConfig(); return GradientMachine::createFromPaddleModelPtr(confPtr, mode, types); } diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index b3140617af188..cf790f2f8ef1d 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -446,7 +446,6 @@ struct OptimizationConfigPrivate; class OptimizationConfig { DISABLE_COPY_AND_ASSIGN(OptimizationConfig); OptimizationConfig(); - void* getRawPtr(); public: static OptimizationConfig* createFromProtoString(const std::string& str); @@ -462,6 +461,7 @@ class OptimizationConfig { friend class TrainerConfig; friend class ParameterOptimizer; + friend class Trainer; }; struct ParameterPrivate; @@ -515,8 +515,6 @@ class ModelConfig { virtual ~ModelConfig(); private: - void* getPaddleModelConfig() const; - ModelConfigPrivate* m; friend class TrainerConfig; friend struct TrainerConfigPrivate; @@ -539,6 +537,7 @@ class TrainerConfig { static TrainerConfig* createFromTrainerConfigFile( const std::string& configPath); + static TrainerConfig* createFromProtoString(const std::string& str); ModelConfig* getModelConfig() const; @@ -546,6 +545,7 @@ class TrainerConfig { private: TrainerConfigPrivate* m; + friend class Trainer; }; /** @@ -700,11 +700,12 @@ class GradientMachine { GradientMachinePrivate* m; static GradientMachine* createFromPaddleModelPtr( - void* confPtr, GradientMatchineCreateMode mode, + const void* confPtr, GradientMatchineCreateMode mode, const std::vector& types); // Not to use c++ 11 init-list, so we use static var as function default arg. static std::vector defaultParamTypes; + friend class Trainer; }; struct TrainerPrivate; @@ -712,6 +713,7 @@ class Trainer { private: TrainerPrivate* m; Trainer(); + Trainer(TrainerConfig* optConfig, GradientMachine* gm); DISABLE_COPY_AND_ASSIGN(Trainer); public: @@ -720,38 +722,42 @@ class Trainer { /// Create A Trainer By TrainerConfig. using paddle command line. static Trainer* createByCommandLine() throw(IOError); - /// Start Train. + static Trainer* create(TrainerConfig* optConfig, GradientMachine* gm) + throw(IOError); + + /// Start training void startTrain(); + + /// Finish training void finishTrain(); - /// Start Pass. + /// Start a pass. void startTrainPass(); - void finishTrainPass(); - void setBatchSize(size_t batchSize); + /// Finish a pass + void finishTrainPass(); /** * Train one batch, * - * @param batchSize -1 wiil use command line or batch size set before, - * otherwise use this batchSize for train. - * * @return true if all batch finished. */ - bool trainOneBatch(size_t batchSize = -1UL); + bool trainOneBatch(size_t batchSize); - bool prepareBatchData(size_t batchSize = -1UL); + void trainOneDataBatch(size_t batchSize, const Arguments& args); - void finishTrainOneBatch(); + void startTestPeriod(); + void testOneDataBatch(size_t batchSize, const Arguments& args); + void finishTestPeriod(); - void forwardOneBatch() throw(UnsupportError); + void forwardOneBatch(size_t batchSize); - Arguments* getNetworkOutput(); + Arguments* getForwardOutput(); Matrix* getLayerOutput(const std::string& layerName); }; -/// The N-Best results generated from one input sequence. +/// the N-Best results generated from one input sequence. class ISequenceResults { public: virtual ~ISequenceResults(); diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/api/PaddleAPIPrivate.h new file mode 100644 index 0000000000000..93cdca8c4beaa --- /dev/null +++ b/paddle/api/PaddleAPIPrivate.h @@ -0,0 +1,68 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/trainer/TrainerConfigHelper.h" + +#pragma once + +struct GradientMachinePrivate { + std::shared_ptr machine; + + template + inline T& cast(void* ptr) { + return *(T*)(ptr); + } +}; + +struct OptimizationConfigPrivate { + std::shared_ptr trainer_config; + paddle::OptimizationConfig config; + + const paddle::OptimizationConfig& getConfig() { + if (trainer_config != nullptr) { + return trainer_config->getOptConfig(); + } else { + return config; + } + } +}; + +struct TrainerConfigPrivate { + std::shared_ptr conf; + TrainerConfigPrivate() {} +}; + +struct ModelConfigPrivate { + std::shared_ptr conf; +}; + +struct ArgumentsPrivate { + std::vector outputs; + + inline paddle::Argument& getArg(size_t idx) throw(RangeError) { + if (idx < outputs.size()) { + return outputs[idx]; + } else { + RangeError e; + throw e; + } + } + + template + std::shared_ptr& cast(void* rawPtr) const { + return *(std::shared_ptr*)(rawPtr); + } +}; + diff --git a/paddle/api/ParameterOptimizer.cpp b/paddle/api/ParameterOptimizer.cpp index e087defc6043c..b13761ab0900d 100644 --- a/paddle/api/ParameterOptimizer.cpp +++ b/paddle/api/ParameterOptimizer.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" #include "paddle/parameter/ParameterOptimizer.h" #include "Internal.h" #include @@ -60,10 +61,9 @@ ParameterOptimizer::~ParameterOptimizer() { ParameterOptimizer* ParameterOptimizer::create(OptimizationConfig* config) { CHECK(config != nullptr); - auto opt_config_ptr = (paddle::OptimizationConfig*)config->getRawPtr(); auto retOptimizer = new ParameterOptimizer(); retOptimizer->m->optimizer.reset( - paddle::ParameterOptimizer::create(*opt_config_ptr, false)); + paddle::ParameterOptimizer::create(config->m->getConfig(), false)); return retOptimizer; } diff --git a/paddle/api/Trainer.cpp b/paddle/api/Trainer.cpp index 95b578c8db9fd..b61f36f740d47 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/api/Trainer.cpp @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PaddleAPI.h" +#include "PaddleAPIPrivate.h" #include #include @@ -30,31 +31,17 @@ P_DECLARE_string(config); P_DECLARE_string(init_model_path); P_DECLARE_int32(start_pass); -struct TrainPassContext { - int64_t batchId; - int32_t batchSize; - real avgTestCost; - int64_t numAvgTests; - int passInnerId; - paddle::DataBatch data; - std::vector forwardOutput; -}; - struct TrainerPrivate : public paddle::Trainer { - void startTrain(); - void finishTrain(); - - void startTrainPass(); - void finishTrainPass(); - - bool _trainOneBatch(); - - bool _prepareBatchData(); - void _forwardOneBatch() throw(UnsupportError); - + bool _trainOneBatch(size_t batchSize); + bool forwardOneBatch(size_t batchSize); + void forwardOneDataBatch(const std::vector& inArgs); + void setBatchSize(size_t batchSize); + std::vector& getForwardOutput(); + + void startTestPeriod(); + void finishTestPeriod(); + void testOneDataBatch(const paddle::DataBatch& dataBatch); TrainerPrivate() : paddle::Trainer() {} - - TrainPassContext trainPassContext; }; Trainer::Trainer() : m(new TrainerPrivate()) { @@ -75,61 +62,76 @@ Trainer* Trainer::createByCommandLine() throw(IOError) { } } -void Trainer::startTrain() { m->startTrain(); } +Trainer::Trainer(TrainerConfig* config, GradientMachine* gm) + : m(new TrainerPrivate()) { + m->init(config->m->conf, /* testing= */false, gm ? gm->m->machine : nullptr); +} -void TrainerPrivate::startTrain() { - srand(this->config_->getConfig().start_pass() + 1); - this->dataProvider_->reset(); - this->trainerInternal_.getGradientMachine()->start(*config_, dataProvider_); +Trainer* Trainer::create(TrainerConfig* config, GradientMachine* gm) + throw(IOError) +{ + auto retv = new Trainer(config, gm); + if (retv->m->getConfig().IsInitialized()) { + return retv; + } else { + retv->m->getConfig().CheckInitialized(); + throw IOError(); + } } -void Trainer::finishTrain() { m->finishTrain(); } +void Trainer::startTrain() { m->startTrain(); } -void TrainerPrivate::finishTrain() { - this->trainerInternal_.getGradientMachine()->finish(); -} +void Trainer::finishTrain() { m->finishTrain(); } void Trainer::startTrainPass() { m->startTrainPass(); } -void TrainerPrivate::startTrainPass() { - this->stats_.reset(); - this->trainPassContext.batchId = 0; - this->trainPassContext.batchSize = this->config_->getOptConfig().batch_size(); - this->trainPassContext.avgTestCost = 0; - this->trainPassContext.numAvgTests = 0; - this->trainPassContext.passInnerId = 0; - this->trainerInternal_.getParameterUpdater()->startPass(); - this->evaluator_->start(); -} - void Trainer::finishTrainPass() { m->finishTrainPass(); } -void TrainerPrivate::finishTrainPass() { - this->trainerInternal_.getGradientMachine()->onPassEnd(); - this->trainerInternal_.getParameterUpdater()->finishPass(); - evaluator_->finish(); +void Trainer::trainOneDataBatch(size_t batchSize, const Arguments& inArgs) { + paddle::DataBatch dataBatch; + dataBatch.getStreams() = inArgs.m->outputs; + dataBatch.setSize(batchSize); + m->trainOneDataBatch(dataBatch); } -void Trainer::setBatchSize(size_t batchSize) { - this->m->trainPassContext.batchSize = batchSize; +bool Trainer::trainOneBatch(size_t batchSize) { + return m->_trainOneBatch(batchSize); } -bool Trainer::trainOneBatch(size_t batchSize) { - if (batchSize == -1UL) { - this->setBatchSize(batchSize); +bool TrainerPrivate::_trainOneBatch(size_t batchSize) { + paddle::DataBatch dataBatch; + CHECK(dataProvider_) << "data_provider is not specified"; + int num = dataProvider_->getNextBatch(batchSize, &dataBatch); + if (num == 0) { + return false; } - return m->_trainOneBatch(); + trainOneDataBatch(dataBatch); + return false; } -bool TrainerPrivate::_trainOneBatch() { - if (this->_prepareBatchData()) { - return true; +void TrainerPrivate::startTestPeriod() { + if (!tester_) { + createTester(); } - this->trainerInternal_.trainOneBatch(this->trainPassContext.batchId, - this->trainPassContext.data); - return false; + tester_->startTestPeriod(); +} + +void Trainer::startTestPeriod() { m->startTestPeriod(); } + +void TrainerPrivate::testOneDataBatch(const paddle::DataBatch& dataBatch) { + tester_->testOneDataBatch(dataBatch, &forwardOutput_); +} + +void Trainer::testOneDataBatch(size_t batchSize, const Arguments& args) { + paddle::DataBatch dataBatch; + dataBatch.getStreams() = args.m->outputs; + dataBatch.setSize(batchSize); + m->testOneDataBatch(dataBatch); } +void TrainerPrivate::finishTestPeriod() { tester_->finishTestPeriod(); } +void Trainer::finishTestPeriod() { m->finishTestPeriod(); } + Matrix* Trainer::getLayerOutput(const std::string& layerName) { auto nn = std::dynamic_pointer_cast( this->m->getGradientMachine()); @@ -138,46 +140,37 @@ Matrix* Trainer::getLayerOutput(const std::string& layerName) { return Matrix::createByPaddleMatrixPtr(&m); } -bool Trainer::prepareBatchData(size_t batchSize) { - if (batchSize != -1UL) { - this->setBatchSize(batchSize); +void Trainer::forwardOneBatch(size_t batchSize) { m->forwardOneBatch(batchSize); } + +bool TrainerPrivate::forwardOneBatch(size_t batchSize) { + CHECK(dataProvider_) << "data_provider is not specified"; + paddle::DataBatch dataBatch; + int num = dataProvider_->getNextBatch(batchSize, &dataBatch); + if (num == 0) { + return false; } - return this->m->_prepareBatchData(); -} -bool TrainerPrivate::_prepareBatchData() { - int num = dataProvider_->getNextBatch(this->trainPassContext.batchSize, - &this->trainPassContext.data); - return num == 0; + forwardOneDataBatch(dataBatch.getStreams()); + return true; } -void Trainer::finishTrainOneBatch() { ++m->trainPassContext.batchId; } +void TrainerPrivate::forwardOneDataBatch( + const std::vector& inArgs) { -void Trainer::forwardOneBatch() throw(UnsupportError) { m->_forwardOneBatch(); } - -void TrainerPrivate::_forwardOneBatch() throw(UnsupportError) { - auto& dataBatch = this->trainPassContext.data; - - int64_t actualBatchSize = dataBatch.getSize(); - if (actualBatchSize == 0) { - return; - } - - const std::vector& inArgs = dataBatch.getStreams(); - std::vector& outArgs = this->trainPassContext.forwardOutput; - outArgs.clear(); - paddle::PassType passType = - this->trainerInternal_.getParameterUpdater()->startBatch(actualBatchSize); + std::vector& outArgs = forwardOutput_; if (config_->getOptConfig().use_sparse_remote_updater()) { - this->trainerInternal_.getGradientMachine()->prefetch(inArgs); - this->trainerInternal_.getParameterUpdater()->getParametersRemote(); + trainerInternal_.getGradientMachine()->prefetch(inArgs); + trainerInternal_.getParameterUpdater()->getParametersRemote(); } - this->trainerInternal_.getGradientMachine()->forward( - inArgs, &outArgs, passType); + trainerInternal_.getGradientMachine()->forward( + inArgs, &outArgs, paddle::PASS_TEST); +} + +Arguments* Trainer::getForwardOutput() { + return Arguments::createByPaddleArgumentVector(&m->getForwardOutput()); } -Arguments* Trainer::getNetworkOutput() { - return Arguments::createByPaddleArgumentVector( - &m->trainPassContext.forwardOutput); +std::vector& TrainerPrivate::getForwardOutput() { + return forwardOutput_; } diff --git a/paddle/api/test/run_tests.sh b/paddle/api/test/run_tests.sh index 1fc6fd5a8c185..a4814f98f89c2 100755 --- a/paddle/api/test/run_tests.sh +++ b/paddle/api/test/run_tests.sh @@ -30,7 +30,7 @@ source .test_env/bin/activate pip --timeout 600 install ../../dist/*.whl -test_list="testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py" +test_list="testArguments.py testGradientMachine.py testMatrix.py testVector.py testTrain.py testTrainer.py" export PYTHONPATH=$PWD/../../../python/ diff --git a/paddle/api/test/testTrain.py b/paddle/api/test/testTrain.py index 7f79c2701e9ed..7759118a3d9d1 100644 --- a/paddle/api/test/testTrain.py +++ b/paddle/api/test/testTrain.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from py_paddle import swig_paddle, DataProviderWrapperConverter +from py_paddle import swig_paddle import paddle.trainer.config_parser -from paddle.trainer.PyDataProviderWrapper import DenseSlot, IndexSlot import numpy import util diff --git a/paddle/api/test/testTrainer.py b/paddle/api/test/testTrainer.py new file mode 100644 index 0000000000000..da69a60f84f4d --- /dev/null +++ b/paddle/api/test/testTrainer.py @@ -0,0 +1,63 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer.config_parser import parse_config +from paddle.trainer.config_parser import logger +from py_paddle import swig_paddle +import util + +def main(): + trainer_config = parse_config( + "./testTrainConfig.py", "") + model = swig_paddle.GradientMachine.createFromConfigProto( + trainer_config.model_config) + trainer = swig_paddle.Trainer.create(trainer_config, model) + trainer.startTrain() + for train_pass in xrange(2): + trainer.startTrainPass() + num = 0 + cost = 0 + while True: # Train one batch + batch_size = 1000 + data, atEnd = util.loadMNISTTrainData(batch_size) + if atEnd: + break + trainer.trainOneDataBatch(batch_size, data) + outs = trainer.getForwardOutput() + cost += sum(outs[0]['value']) + num += batch_size + trainer.finishTrainPass() + logger.info('train cost=%f' % (cost / num)) + + trainer.startTestPeriod() + num = 0 + cost = 0 + while True: # Test one batch + batch_size = 1000 + data, atEnd = util.loadMNISTTrainData(batch_size) + if atEnd: + break + trainer.testOneDataBatch(batch_size, data) + outs = trainer.getForwardOutput() + cost += sum(outs[0]['value']) + num += batch_size + trainer.finishTestPeriod() + logger.info('test cost=%f' % (cost / num)) + + trainer.finishTrain() + + +if __name__ == '__main__': + swig_paddle.initPaddle("--use_gpu=0", "--trainer_count=1") + main() diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index 6d8f5da3e298f..dd2e146d112c0 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -63,7 +63,7 @@ def __init__(self, input_type, pos): def scan(self, dat): self.extend_cols(dat) - self.__rows__.append(len(dat) + self.__rows__[-1]) + self.__rows__.append(len(self.__cols__)) self.__height__ += 1 def extend_cols(self, dat): diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index e6cf2710ef523..53f67a861e7d9 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -79,6 +79,20 @@ def wrap(callback): else: return __ParameterCallbackWrapper__(callback).__disown__() +def __arguments_to_numpy__(i, arg): + assert isinstance(arg, swig_paddle.Arguments) + value = arg.getSlotValue(i) + if value is not None: + assert isinstance(value, swig_paddle.Matrix) + value = value.copyToNumpyMat() + ids = arg.getSlotIds(i) + if ids is not None: + assert isinstance(ids, swig_paddle.IVector) + ids = ids.copyToNumpyArray() + return { + "value": value, + "id": ids + } def __monkeypatch_gradient_machine__(): """ @@ -88,20 +102,6 @@ def __monkeypatch_gradient_machine__(): swig_paddle.GradientMachine.loadFromConfigFile = \ staticmethod(loadGradientMachine) - def __arguments_to_numpy__(i, arg): - assert isinstance(arg, swig_paddle.Arguments) - value = arg.getSlotValue(i) - if value is not None: - assert isinstance(value, swig_paddle.Matrix) - value = value.copyToNumpyMat() - ids = arg.getSlotIds(i) - if ids is not None: - assert isinstance(ids, swig_paddle.IVector) - ids = ids.copyToNumpyArray() - return { - "value": value, - "id": ids - } def __matrix_to_numpy__(m): if isinstance(m, swig_paddle.Matrix): @@ -126,7 +126,7 @@ def createFromConfigProto(protoObj, :type paramTypes: list of int :return: paddle.GradientMachine """ - assert isinstance(protoObj, paddle.proto.ModelConfig_pb2.ModelConfig) + assert isinstance(protoObj, paddle.proto.ModelConfig) return swig_paddle.GradientMachine.createByConfigProtoStr( protoObj.SerializeToString(), createMode, paramTypes) @@ -460,13 +460,29 @@ def OptimizationConfig_createFromProto(protoObj): """ assert isinstance(protoObj, - paddle.proto.TrainerConfig_pb2.OptimizationConfig) + paddle.proto.OptimizationConfig) return swig_paddle.OptimizationConfig.createFromProtoString( protoObj.SerializeToString()) swig_paddle.OptimizationConfig.createFromProto = staticmethod( OptimizationConfig_createFromProto) + def TrainerConfig_createFromProto(protoObj): + """ + Create a new paddle.TrainerConfig from + proto.OptimizationConfig + + :param protoObj: proto.TrainerConfig + :return: paddle.TrainerConfig + """ + assert isinstance(protoObj, + paddle.proto.TrainerConfig) + return swig_paddle.TrainerConfig.createFromProtoString( + protoObj.SerializeToString()) + + swig_paddle.TrainerConfig.createFromProto = staticmethod( + TrainerConfig_createFromProto) + def __monkey_patch_parameter__(): def getBufs(self): @@ -483,9 +499,66 @@ def getBufs(self): swig_paddle.Parameter.getBufs = getBufs +def __monkey_patch_trainer__(): + swig_paddle.Trainer.__create__ = staticmethod(swig_paddle.Trainer.create) + + def Trainer_create(config, model=None): + """ + Create a trainer for model with TrainerCOnfig trainer_config + trainer_config.model_config will be ignored when model is supplied. + Trainer.trainOneBatch() and Trainer.forwardOneBatch() can be used only + when trainer_config.data_config is set. + + A typical usage for Trainer is: + .. code-block:: python + trainer = Trainer.create(trainer_config, model) + for p in xrange(num_passes) + while True: + data = get_next_batch(batch_size) + if not data: + break + trainer.trainOneDataBatch(batch_size, data) + trainer.finishTrainPass() + trainer.finishTrain() + + The trainer will take care of logging, model saving, distributed + training, etc. + + :param config: trainer configuration + :type config: paddle.proto.TrainerConfig + :param model: the model to be trained + :type model: swig_paddle.GradientMachine + :return: a trainer + :rtype swig_paddle.Trainer + + """ + assert isinstance(config, paddle.proto.TrainerConfig) + if model is not None: + assert isinstance(model, swig_paddle.GradientMachine) + return swig_paddle.Trainer.__create__( + swig_paddle.TrainerConfig.createFromProto(config), model) + swig_paddle.Trainer.create = staticmethod(Trainer_create) + + swig_paddle.Trainer.__getForwardOutput__ = \ + swig_paddle.Trainer.getForwardOutput + + def getForwardOutput(self): + """ + Get the netword outputs from the previous trainOneBatch(), + trainOneDataBatch(), testOneDataPatch(), or forwardOneBatch() call. + + :return: list of dictionary with keys ['id', 'value'], each value is a + numpy.ndarray. + """ + outArgs = self.__getForwardOutput__() + return [__arguments_to_numpy__(i, outArgs) for i in xrange( + outArgs.getSlotNum())] + + swig_paddle.Trainer.getForwardOutput = getForwardOutput + def monkeypatches(): patches = [__monkeypatch_init_paddle__, __monkeypatch_gradient_machine__, __monkey_patch_protobuf_objects__, - __monkey_patch_parameter__] + __monkey_patch_parameter__, __monkey_patch_trainer__] for patch in patches: patch() diff --git a/paddle/trainer/Tester.cpp b/paddle/trainer/Tester.cpp index ccf06e1d84edc..b1bb75654a9d5 100644 --- a/paddle/trainer/Tester.cpp +++ b/paddle/trainer/Tester.cpp @@ -71,24 +71,36 @@ Tester::Tester(const std::shared_ptr &config, parameterUpdater_)); } +void Tester::startTestPeriod() { + testEvaluator_->start(); + testContext_.cost = 0; + testContext_.numSamples = 0; + + parameterUpdater_->apply(); + if (intconfig_->prevBatchState) { + gradientMachine_->getState(*intconfig_->trainState); + gradientMachine_->setState(*intconfig_->testState); + } +} + +void Tester::testOneDataBatch( + const DataBatch& dataBatch, std::vector* outArgs) { + testContext_.cost += forwardOneBatch( + dataBatch, testEvaluator_.get(), outArgs); + testContext_.numSamples += dataBatch.getSize(); +} + void Tester::testOnePeriod() { DataBatch dataBatch; int64_t batchSize = config_->getOptConfig().batch_size(); - testEvaluator_->start(); - real cost = 0; - int64_t numSamples = 0; bool testAllData = intconfig_->testPeriod == 0 || intconfig_->testAllDataInOnePeriod; - int batches = testAllData ? std::numeric_limits::max() : intconfig_->testPeriod; - parameterUpdater_->apply(); - if (intconfig_->prevBatchState) { - gradientMachine_->getState(*intconfig_->trainState); - gradientMachine_->setState(*intconfig_->testState); - } + std::vector outArgs; + startTestPeriod(); for (int i = 0; i < batches; ++i) { int num = testDataProvider_->getNextBatch(batchSize, &dataBatch); if (num == 0) { @@ -102,13 +114,17 @@ void Tester::testOnePeriod() { num = testDataProvider_->getNextBatch(batchSize, &dataBatch); } } - cost += testOneBatch(dataBatch, testEvaluator_.get()); - numSamples += num; + testOneDataBatch(dataBatch, &outArgs); } +} + +void Tester::finishTestPeriod() { testEvaluator_->finish(); - CHECK_GT(numSamples, 0) << "There is no samples in your test batch. Possibly " - "wrong implementation of DataProvidor.reset()"; - LOG(INFO) << " Test samples=" << numSamples << " cost=" << cost / numSamples + CHECK_GT(testContext_.numSamples, 0) + << "There is no samples in your test batch. Possibly " + "wrong implementation of DataProvidor.reset()"; + LOG(INFO) << " Test samples=" << testContext_.numSamples + << " cost=" << testContext_.cost / testContext_.numSamples << " Eval: " << *testEvaluator_; parameterUpdater_->restore(); if (intconfig_->prevBatchState) { @@ -128,9 +144,11 @@ int64_t Tester::testOneBatchById(int64_t batchId) { return 0; } + std::vector outArgs; + stats_ += std::pair{ actualBatchSize, - testOneBatch(dataBatch, testEvaluator_.get())}; + forwardOneBatch(dataBatch, testEvaluator_.get(), &outArgs)}; if (((batchId + 1) % intconfig_->logPeriod) == 0) { LOG(INFO) << " Batch=" << batchId + 1 << " " << stats_.getStats(false); @@ -139,7 +157,10 @@ int64_t Tester::testOneBatchById(int64_t batchId) { return actualBatchSize; } -real Tester::testOneBatch(const DataBatch &dataBatch, Evaluator *evaluator) { +real Tester::forwardOneBatch(const DataBatch &dataBatch, + Evaluator *evaluator, + std::vector* pOutArgs) { + auto& outArgs = *pOutArgs; const std::vector& inArgs = dataBatch.getStreams(); if (intconfig_->loadsaveParametersInPserver) { REGISTER_TIMER("prefetch"); @@ -148,12 +169,11 @@ real Tester::testOneBatch(const DataBatch &dataBatch, Evaluator *evaluator) { true /*after apply*/); } - std::vector outArgs; gradientMachine_->forward(inArgs, &outArgs, PASS_TEST); // write features if set this flag and outArgs is not empty std::string featFile = intconfig_->featFile; - if (!featFile.empty() && !outArgs.empty()) { + if (!featFile.empty() && outArgs.empty()) { size_t numOutputs = outArgs.size(); std::vector featMatrices; featMatrices.resize(numOutputs); diff --git a/paddle/trainer/Tester.h b/paddle/trainer/Tester.h index 9663b8def9145..671ffc5220eba 100644 --- a/paddle/trainer/Tester.h +++ b/paddle/trainer/Tester.h @@ -68,6 +68,10 @@ class Tester { * is training at same time. */ void testOnePeriod(); + void startTestPeriod(); + void finishTestPeriod(); + void testOneDataBatch(const DataBatch& dataBatch, + std::vector* outArgs); /** * Test for given data batch. @@ -75,7 +79,9 @@ class Tester { * @param evaluator Evaluator * @return cost */ - real testOneBatch(const DataBatch &dataBatch, Evaluator *evaluator); + real forwardOneBatch(const DataBatch& dataBatch, + Evaluator* evaluator, + std::vector* outArgs); /** @@ -99,6 +105,10 @@ class Tester { std::ofstream os_; std::vector cpuMat_; std::vector cpuVec_; + struct { + int64_t numSamples; + real cost; + } testContext_; private: /** diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index 275150e12d12b..04535849eb4aa 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -196,7 +196,8 @@ void Trainer::init(const std::shared_ptr &config, if (!dataProvider_ && config_->hasDataConfig()) { dataProvider_.reset(DataProvider::create(*config_, *config_, gpuData)); } - if (dataProvider_) { + if (!testDataProvider_) { + // No evaluator_ if there is testDataProvider but no dataProvider. evaluator_.reset(trainerInternal_.getGradientMachine()->makeEvaluator()); currentEvaluator_.reset( trainerInternal_.getGradientMachine()->makeEvaluator()); @@ -215,10 +216,7 @@ void Trainer::init(const std::shared_ptr &config, DataProvider::create(config_->getTestDataConfig(), *config_, gpuData)); } if (testDataProvider_) { - tester_.reset(new Tester(config_, createTesterConfig(), - trainerInternal_.getGradientMachine(), - trainerInternal_.getParameterUpdater(), - testDataProvider_)); + createTester(); } if (!testing && @@ -258,34 +256,25 @@ void Trainer::init(const std::shared_ptr &config, } } - // set current evaluator and evalutor trainerInternal_.setCurrentEvaluator(currentEvaluator_.get()); trainerInternal_.setEvaluator(evaluator_.get()); } void Trainer::train(size_t numPasses) { - srand(config_->getConfig().start_pass() + 1); - dataProvider_->reset(); - - if (this->testDataProvider_) { - this->testDataProvider_->reset(); - } - - trainerInternal_.getGradientMachine()->start(*config_, dataProvider_); - + startTrain(); for (size_t i = 0; i < numPasses; ++i) { if (IGradientMachineMode::trainWholeDataInOneBatch(mode_)) { trainOnePassBatch(config_->getConfig().start_pass() + i); } else { - trainOnePass(config_->getConfig().start_pass() + i); + trainOnePass(); } if (i < numPasses - 1) { dataProvider_->reset(); } } - trainerInternal_.getGradientMachine()->finish(); + finishTrain(); } @@ -387,13 +376,30 @@ real Trainer::checkGradient() { return maxDiff; } -void Trainer::trainOnePass(int passId) { - this->stats_->reset(); - int64_t batchId = 0; - int32_t batchSize = config_->getOptConfig().batch_size(); - real avgTestCost = 0; - int64_t numAvgTests = 0; - int passInnerId = 1; +void Trainer::startTrain() { + trainPassContext_.passId = config_->getConfig().start_pass(); + srand(config_->getConfig().start_pass() + 1); + if (dataProvider_) { + dataProvider_->reset(); + } + + if (this->testDataProvider_) { + this->testDataProvider_->reset(); + } + + trainerInternal_.getGradientMachine()->start(*config_, dataProvider_); +} + +void Trainer::finishTrain() { + trainerInternal_.getGradientMachine()->finish(); +} + +void Trainer::startTrainPass() { + stats_->reset(); + trainPassContext_.batchId = 0; + trainPassContext_.avgTestCost = 0; + trainPassContext_.numAvgTests = 0; + trainPassContext_.passInnerId = 1; trainerInternal_.getParameterUpdater()->startPass(); evaluator_->start(); @@ -401,81 +407,83 @@ void Trainer::trainOnePass(int passId) { trainerInternal_.getGradientMachine()->resetState(); trainerInternal_.getGradientMachine()->getState(testState_); } - while (true) { - DataBatch dataBatch; - - int num = 0; - { - REGISTER_TIMER("getTrainBatch"); - num = dataProvider_->getNextBatch(batchSize, &dataBatch); - } - if (num == 0) break; +} - if (averageEvaluator_) { - int64_t mod = batchId % FLAGS_average_test_period; - if (mod >= FLAGS_average_test_period - FLAGS_log_period) { - if (mod == FLAGS_average_test_period - FLAGS_log_period) { - averageEvaluator_->start(); - } - trainerInternal_.getParameterUpdater()->apply(); - if (FLAGS_prev_batch_state) { - trainerInternal_.getGradientMachine()->getState(trainState_); - } - avgTestCost += - tester_->testOneBatch(dataBatch, averageEvaluator_.get()); - if (FLAGS_prev_batch_state) { - trainerInternal_.getGradientMachine()->setState(trainState_); - } - numAvgTests += num; - trainerInternal_.getParameterUpdater()->restore(); +void Trainer::trainOneDataBatch(DataBatch& dataBatch) { + int num = dataBatch.getSize(); + if (averageEvaluator_) { + int64_t mod = trainPassContext_.batchId % FLAGS_average_test_period; + if (mod >= FLAGS_average_test_period - FLAGS_log_period) { + if (mod == FLAGS_average_test_period - FLAGS_log_period) { + averageEvaluator_->start(); } + trainerInternal_.getParameterUpdater()->apply(); + if (FLAGS_prev_batch_state) { + trainerInternal_.getGradientMachine()->getState(trainState_); + } + trainPassContext_.avgTestCost += + tester_->forwardOneBatch( + dataBatch, averageEvaluator_.get(), &forwardOutput_); + if (FLAGS_prev_batch_state) { + trainerInternal_.getGradientMachine()->setState(trainState_); + } + trainPassContext_.numAvgTests += num; + trainerInternal_.getParameterUpdater()->restore(); } - { - REGISTER_TIMER("TrainBatch"); - trainerInternal_.trainOneBatch(batchId, dataBatch); - } + } + { + REGISTER_TIMER("TrainBatch"); + trainerInternal_.trainOneBatch( + trainPassContext_.batchId, dataBatch, &forwardOutput_); + } - if (averageEvaluator_ && - batchId % FLAGS_average_test_period == FLAGS_average_test_period - 1) { - averageEvaluator_->finish(); - LOG(INFO) << " Averaged parameter:" - << " cost=" << avgTestCost / numAvgTests - << " Eval: " << *averageEvaluator_; - numAvgTests = 0; - avgTestCost = 0; - } + if (averageEvaluator_ && + trainPassContext_.batchId % FLAGS_average_test_period + == FLAGS_average_test_period - 1) { + averageEvaluator_->finish(); + LOG(INFO) << " Averaged parameter:" + << " cost=" << trainPassContext_.avgTestCost + / trainPassContext_.numAvgTests + << " Eval: " << *averageEvaluator_; + trainPassContext_.numAvgTests = 0; + trainPassContext_.avgTestCost = 0; + } - ++batchId; + ++trainPassContext_.batchId; - if (batchId % FLAGS_log_period == 0) { - FOR_TIMING(globalStat.setThreadInfo(true)); - FOR_TIMING(globalStat.printAllStatus()); - FOR_TIMING(globalStat.reset()); - } + if (trainPassContext_.batchId % FLAGS_log_period == 0) { + FOR_TIMING(globalStat.setThreadInfo(true)); + FOR_TIMING(globalStat.printAllStatus()); + FOR_TIMING(globalStat.reset()); + } - if (testDataProvider_ && FLAGS_test_period > 0 && - batchId % FLAGS_test_period == 0) { - tester_->testOnePeriod(); - } + if (testDataProvider_ && FLAGS_test_period > 0 && + trainPassContext_.batchId % FLAGS_test_period == 0) { + tester_->testOnePeriod(); + } - if (FLAGS_saving_period_by_batches > 0 && - batchId > FLAGS_saving_period_by_batches * passInnerId && - 0 == FLAGS_trainer_id) { - trainerInternal_.getParameterUpdater()->catchUpWith(); - if (testDataProvider_) { - tester_->testOnePeriod(); - } - paramUtil_->saveParametersOnePass(passId, passInnerId); - ++passInnerId; + if (FLAGS_saving_period_by_batches > 0 && + trainPassContext_.batchId + > FLAGS_saving_period_by_batches * trainPassContext_.passInnerId && + 0 == FLAGS_trainer_id) { + trainerInternal_.getParameterUpdater()->catchUpWith(); + if (testDataProvider_) { + tester_->testOnePeriod(); } + paramUtil_->saveParametersOnePass( + trainPassContext_.passId, trainPassContext_.passInnerId); + ++trainPassContext_.passInnerId; } +} - if (batchId == 0) { +void Trainer::finishTrainPass() { + if (trainPassContext_.batchId == 0) { // This means no more data from DataProvider return; } - trainerInternal_.finishTrainPass(passId, batchId); + trainerInternal_.finishTrainPass( + trainPassContext_.passId, trainPassContext_.batchId); FOR_TIMING(globalStat.setThreadInfo(true)); FOR_TIMING(globalStat.printAllStatus()); @@ -485,9 +493,30 @@ void Trainer::trainOnePass(int passId) { tester_->testOnePeriod(); } - if (passId % FLAGS_saving_period == 0 && FLAGS_trainer_id == 0) { - paramUtil_->saveParametersOnePass(passId); + if (trainPassContext_.passId % FLAGS_saving_period == 0 + && FLAGS_trainer_id == 0) { + paramUtil_->saveParametersOnePass(trainPassContext_.passId); } + ++trainPassContext_.passId; +} + +void Trainer::trainOnePass() { + startTrainPass(); + size_t batchSize = config_->getOptConfig().batch_size(); + while (true) { + DataBatch dataBatch; + + int num = 0; + { + REGISTER_TIMER("getTrainBatch"); + num = dataProvider_->getNextBatch(batchSize, &dataBatch); + } + if (num == 0) break; + CHECK_EQ(num, dataBatch.getSize()); + trainOneDataBatch(dataBatch); + } + + finishTrainPass(); } void Trainer::trainOnePassBatch(int passId) { @@ -582,6 +611,13 @@ void Trainer::clearGradient() { int Trainer::getBatchSize() { return config_->getOptConfig().batch_size(); } +void Trainer::createTester() { + tester_.reset(new paddle::Tester(config_, createTesterConfig(), + trainerInternal_.getGradientMachine(), + trainerInternal_.getParameterUpdater(), + testDataProvider_)); +} + void Trainer::test() { tester_->test(); } diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index 9bfd6d107a204..4f4811a139e74 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -94,6 +94,11 @@ class Trainer { */ real checkGradient(); + void startTrain(); + void finishTrain(); + void startTrainPass(); + void finishTrainPass(); + void trainOneDataBatch(DataBatch& dataBatch); /** * given a dataBatch and the current parameter value @@ -144,11 +149,11 @@ class Trainer { protected: /** - * Train one pass of data. passId starts from 0 + * Train one pass of data. * * SGD Method. */ - void trainOnePass(int passId); + void trainOnePass(); /** * Train one pass in one batch. @@ -161,6 +166,8 @@ class Trainer { */ void clearGradient(); + void createTester(); + private: std::unique_ptr createTesterConfig(); @@ -173,6 +180,17 @@ class Trainer { MachineState trainState_; MachineState testState_; + struct TrainPassContext { + int64_t batchId; + real avgTestCost; + int64_t numAvgTests; + int passId; + int passInnerId; + }; + std::vector forwardOutput_; + + TrainPassContext trainPassContext_; + std::unique_ptr evaluator_; std::unique_ptr currentEvaluator_; std::unique_ptr averageEvaluator_; diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/trainer/TrainerInternal.cpp index 6029a4b2c1d0a..e23e42927c381 100644 --- a/paddle/trainer/TrainerInternal.cpp +++ b/paddle/trainer/TrainerInternal.cpp @@ -55,6 +55,8 @@ void TrainerInternal::init(const std::shared_ptr &config, gradientMachine_ = gradientMachine; if (!gradientMachine) { + CHECK(config_->getConfig().has_model_config()) + << "Missing model_config in trainer_config"; gradientMachine_.reset(GradientMachine::create( config_->getConfig().model_config(), intconfig_->mode, parameterUpdater_->getParameterTypes())); @@ -62,7 +64,8 @@ void TrainerInternal::init(const std::shared_ptr &config, } void TrainerInternal::trainOneBatch(int64_t batchId, - const DataBatch& dataBatch) { + const DataBatch& dataBatch, + std::vector* outArgs) { // true means updating parameter whenever gradient is ready during backward() bool doPipelineUpdate = (intconfig_->mode != GradientMachine::kSgdSparseCpuTraining) && @@ -84,7 +87,6 @@ void TrainerInternal::trainOneBatch(int64_t batchId, } const std::vector& inArgs = dataBatch.getStreams(); - std::vector outArgs; PassType passType = parameterUpdater_->startBatch(actualBatchSize); @@ -114,7 +116,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId, timer.start(); #endif REGISTER_TIMER("forwardBackward"); - forwardBackwardBatch(inArgs, outArgs, passType, updateCallback, + forwardBackwardBatch(inArgs, *outArgs, passType, updateCallback, doPipelineUpdate); #ifndef PADDLE_DISABLE_TIMER timer.stop(); @@ -132,7 +134,7 @@ void TrainerInternal::trainOneBatch(int64_t batchId, real cost = 0; { REGISTER_TIMER("sumCost"); - cost = Argument::sumCosts(outArgs); + cost = Argument::sumCosts(*outArgs); } if (batchId % intconfig_->log_period == 0) { diff --git a/paddle/trainer/TrainerInternal.h b/paddle/trainer/TrainerInternal.h index 17011c4d2e46f..3a53aa1d17b31 100644 --- a/paddle/trainer/TrainerInternal.h +++ b/paddle/trainer/TrainerInternal.h @@ -81,7 +81,9 @@ class TrainerInternal { * @param batchId current batch id * @param dataBatch data for the batch */ - void trainOneBatch(int64_t batchId, const DataBatch& dataBatch); + void trainOneBatch(int64_t batchId, + const DataBatch& dataBatch, + std::vector* outArgs); /** * showParameterStats diff --git a/proto/TrainerConfig.proto.m4 b/proto/TrainerConfig.proto.m4 index a42ff88d54b5e..3b0e24f90bed8 100644 --- a/proto/TrainerConfig.proto.m4 +++ b/proto/TrainerConfig.proto.m4 @@ -130,7 +130,7 @@ message OptimizationConfig { }; message TrainerConfig { - required ModelConfig model_config = 1; + optional ModelConfig model_config = 1; optional DataConfig data_config = 2; required OptimizationConfig opt_config = 3; optional DataConfig test_data_config = 4; diff --git a/python/paddle/proto/__init__.py b/python/paddle/proto/__init__.py index 7f9e87eee6037..cd6a59ecbb095 100644 --- a/python/paddle/proto/__init__.py +++ b/python/paddle/proto/__init__.py @@ -12,3 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +from paddle.proto.TrainerConfig_pb2 import OptimizationConfig, TrainerConfig +from paddle.proto.ModelConfig_pb2 import ModelConfig From 45dca19a8d8744127331af0e0fd326e4afc740a2 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 28 Oct 2016 11:07:31 +0800 Subject: [PATCH 049/180] Change contribute to paddle to fit new branching model (#275) * Change contribute to paddle to fit new branching model --- doc/build/contribute_to_paddle.md | 41 +++++++++++++++++++++++++------ 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/doc/build/contribute_to_paddle.md b/doc/build/contribute_to_paddle.md index bbdbb4d4227d0..a9ab69c5f42b8 100644 --- a/doc/build/contribute_to_paddle.md +++ b/doc/build/contribute_to_paddle.md @@ -4,7 +4,7 @@ We sincerely appreciate your contributions. You can use fork and pull request workflow to merge your code. ## Code Requirements -- Your code mush be fully documented by +- Your code must be fully documented by [doxygen](http://www.stack.nl/~dimitri/doxygen/) style. - Make sure the compiler option WITH\_STYLE\_CHECK is on and the compiler passes the code style check. @@ -20,16 +20,30 @@ It's just that simple. ## Clone +Paddle is currently using [git-flow branching model](http://nvie.com/posts/a-successful-git-branching-model/). +The **develop** is the main branch, and other user's branches are feature branches. + Once you've created a fork, you can use your favorite git client to clone your repo or just head straight to the command line: ```shell # Clone your fork to your local machine -git clone https://github.com/USERNAME/Paddle.git +git clone --branch develop https://github.com/USERNAME/Paddle.git +``` +If your repository doesn't contain **develop** branch, just create it by your own. + +```shell +git clone https://github.com/USERNAME/Paddle.git Paddle +cd Paddle +git checkout -b develop # create develop branch. +git remote add upstream https://github.com/baidu/Paddle.git # add upstream to baidu/Paddle +git pull upstream develop # update to upstream ``` + Then you can start to develop by making a local developement branch + ```shell -git checkout -b MY_COOL_STUFF_BRANCH origin/master +git checkout -b MY_COOL_STUFF_BRANCH ``` ## Commit @@ -41,7 +55,7 @@ Commit your changes by following command lines: git status # add modified files git add xx -git commit -m "commit info" +env EDITOR=vim git commit # You can write your comments by vim/nano/emacs. ``` The first line of commit infomation is the title. The second and later lines are the details if any. @@ -63,7 +77,7 @@ git remote -v Update your fork with the latest upstream changes: ```shell -git pull --rebase upstream HEAD +git pull --rebase upstream develop ``` If there are no unique commits locally, git will simply perform a fast-forward. @@ -76,7 +90,7 @@ Now, your local master branch is up-to-date with everything modified upstream. ```shell # push to your repository in Github -git push origin HEAD +git push -u origin MY_COOL_STUFF_BRANCH # create remote branch MY_COOL_STUFF_BRANCH to origin. ``` ## Pull Request @@ -93,13 +107,24 @@ of conflict, you need to do the update manually. You need to do the following on your local repository: ```shell git checkout MY_COOL_STUFF_BRANCH -git pull --rebase upstream HEAD +git pull upstream develop # You may need to resolve the conflict according to the git prompt. # Make and test your code. -git push -f origin HEAD +git push origin MY_COOL_STUFF_BRANCH ``` Now your Pull Request is updated with the latest version. ## Revise your pull request When you revise your pull request according to reviewer's comments, please use 'git commit' instead of 'git commit --amend' to commit your changes so that the reviewers can see the difference between the new pull requrest and the old pull request. + +The possible commands are + +```shell +git checkout MY_COOL_STUFF_BRANCH +git pull upstream develop # update local to newest code base. +# May be some conflicts will occured. +# And develop your cool stuff +env EDITOR=vim git commit # add your revise log +git push origin MY_COOL_STUFF_BRANCH +``` From 0e1a22d0fcc80ea3931b7006c891b844b07f29a2 Mon Sep 17 00:00:00 2001 From: backyes Date: Fri, 28 Oct 2016 12:27:40 +0800 Subject: [PATCH 050/180] set test_period default value to 0 (#279) --- paddle/trainer/Trainer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/trainer/Trainer.cpp b/paddle/trainer/Trainer.cpp index 04535849eb4aa..7fc48dd1fbec6 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/trainer/Trainer.cpp @@ -40,7 +40,7 @@ limitations under the License. */ #include "TrainerConfigHelper.h" P_DEFINE_string(config, "", "Trainer config file"); -P_DEFINE_int32(test_period, 1000, +P_DEFINE_int32(test_period, 0, "Run test every so many train batches." " 0 for testing after each pass." " If not 0, test log_period batches." From ca5a5ec480db806674043311b2c59fd318ad4f41 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Fri, 28 Oct 2016 12:51:35 +0800 Subject: [PATCH 051/180] Make Paddle --save_dir support a directory name (#277) * Also fix #243 --- paddle/trainer/ParamUtil.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/paddle/trainer/ParamUtil.cpp b/paddle/trainer/ParamUtil.cpp index dae8b44b6db8e..bb309a54975a1 100644 --- a/paddle/trainer/ParamUtil.cpp +++ b/paddle/trainer/ParamUtil.cpp @@ -89,6 +89,9 @@ void ParameterUtil::saveParameters(int passId, int passInnerId) { } std::string basePath = config_->getSaveDir(); + if (basePath.find('/') == std::string::npos) { + basePath = "./" + basePath; + } mkDirRecursively(basePath.c_str()); std::string saveDir = path::join(basePath, buf); From fc9ca53ae421ec2eac5648ec8de6a1e7a993bfd0 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Fri, 28 Oct 2016 12:54:13 +0800 Subject: [PATCH 052/180] fix interface bug of block_expand_layer and add unittest (#265) * fix interface bug of block_expand_layer and add unittest * auto compute num_channels * default value of num_channels is None * adjust input order of block_expand --- .../paddle/trainer_config_helpers/layers.py | 34 ++++++++++--------- .../tests/configs/check.md5 | 2 +- .../tests/configs/test_maxout.py | 21 +++++++++++- 3 files changed, 39 insertions(+), 18 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index f8c32dc91f10b..7df9108ae82a4 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -54,7 +54,7 @@ 'cross_entropy_with_selfnorm', 'cross_entropy', 'multi_binary_label_cross_entropy', 'rank_cost', 'lambda_cost', 'huber_cost', - # 'block_expand_layer', # TODO(yuyang18): this layer is not correct + 'block_expand_layer', 'maxout_layer', 'out_prod_layer', 'print_layer' ] @@ -3284,18 +3284,18 @@ def linear_comb_layer(weights, vectors, size=None, name=None, @wrap_name_default() @layer_support() def block_expand_layer(input, - channel=0, block_x=0, block_y=0, stride_x=0, stride_y=0, padding_x=0, padding_y=0, + num_channels=None, name=None, layer_attr=None): """ Expand feature map to minibatch matrix. - - matrix width is: block_y * block_x * channel + - matrix width is: block_y * block_x * num_channels - matirx height is: outputH * outputW .. math:: @@ -3307,7 +3307,7 @@ def block_expand_layer(input, The expand method is the same with ExpandConvLayer, but saved the transposed value. After expanding, output.sequenceStartPositions will store timeline. The number of time steps are outputH * outputW and the dimension of each - time step is block_y * block_x * channel. This layer can be used after + time step is block_y * block_x * num_channels. This layer can be used after convolution neural network, and before recurrent neural network. The simple usage is: @@ -3315,7 +3315,7 @@ def block_expand_layer(input, .. code-block:: python block_expand = block_expand_layer(input, - channel=128, + num_channels=128, stride_x=1, stride_y=1, block_x=1, @@ -3323,8 +3323,8 @@ def block_expand_layer(input, :param input: The input layer. :type input: LayerOutput - :param channel: The channel number of input layer. - :type channel: int + :param num_channels: The channel number of input layer. + :type num_channels: int|None :param block_x: The width of sub block. :type block_x: int :param block_y: The width of sub block. @@ -3344,16 +3344,18 @@ def block_expand_layer(input, :return: LayerOutput object. :rtype: LayerOutput """ + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters Layer(name=name, - input=Input(input.name, - block_expand=BlockExpand(channels=channel, - block_x=block_x, - block_y=block_y, - stride_x=stride_x, - stride_y=stride_y, - padding_x=padding_x, - padding_y=padding_y) - ), + inputs=Input(input.name, + block_expand=BlockExpand(channels=num_channels, + block_x=block_x, + block_y=block_y, + stride_x=stride_x, + stride_y=stride_y, + padding_x=padding_x, + padding_y=padding_y)), type=LayerType.BLOCK_EXPAND, **ExtraLayerAttribute.to_kwargs(layer_attr) ) diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 88ce5c129e552..d1b22b34903df 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -12,7 +12,7 @@ a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 8bb44e1e5072d0c261572307e7672bda test_grumemory_layer.protostr 1f3510672dce7a9ed25317fc58579ac7 test_hsigmoid.protostr d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr -6fa59551808ee7012bbd24f757e782d2 test_maxout.protostr +5433ed33d4e7414eaf658f2a55946186 test_maxout.protostr 251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr 2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py index 079e2cf4c4320..7c1fb04766ae5 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.py @@ -25,6 +25,25 @@ stride=2, pool_type=MaxPooling()) -fc = fc_layer(input=pool, size=384, bias_attr=False) +conv2 = img_conv_layer(input=pool, + filter_size = 3, + num_channels=32, + num_filters=128, + padding=1, + act=LinearActivation(), + bias_attr=True) + +maxout2 = maxout_layer(input=conv, + num_channels=128, + groups=4) + +block = block_expand_layer(input=maxout, + num_channels=32, + stride_x=1, + stride_y=1, + block_x=1, + block_y=6) + +fc = fc_layer(input=block, size=384, bias_attr=False) outputs(fc) From fa24cbdbe1b18649a9140152b971473ba99079f7 Mon Sep 17 00:00:00 2001 From: backyes Date: Fri, 28 Oct 2016 13:10:04 +0800 Subject: [PATCH 053/180] Support empty Param Block in ParameterSever (#244) * Because in cluster maybe use a lot machine to train a model, and some parameter size could be too small for ParameterServer. Then some of pservers could not have any ParamBlock. * Also, because ports_num or ports_num_for_sparse is too large, then give a warning in runtime. --- paddle/pserver/ParameterServer2.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/pserver/ParameterServer2.cpp index 8f72c1988d167..c8f37d0bf4f84 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/pserver/ParameterServer2.cpp @@ -264,6 +264,15 @@ void ParameterServer2::setParameter(const SendParameterRequest& request, std::vector blockIds; blockIds.reserve(request.blocks_size()); int bufferIndex = 0; + + if (!request.blocks().size()) { + LOG(WARNING) + << "--ports_num or --ports_num_for_sparse might be too large, " + << "or total dense parameter size or sparse parameters size " + << "might be too small, this psever doesn't store any parameter."; + return; + } + for (const auto& block : request.blocks()) { /// block size for parameter(e.g. 128 for sparse row, 1K for dense) uint64_t blockSize = getParameterConfig(block).parameter_block_size(); From ddfff3a7fd0e746f8ff232945ed0a05a89b9da1c Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 30 Oct 2016 01:04:06 +0800 Subject: [PATCH 054/180] Add bilinear interpolation layer --- doc/ui/api/trainer_config_helpers/layers.rst | 6 + paddle/cuda/include/hl_cnn.h | 56 +++++++ paddle/cuda/include/stub/hl_cnn_stub.h | 24 +++ paddle/cuda/src/hl_cuda_cnn.cu | 134 ++++++++++++++- paddle/gserver/layers/BilinearInterpLayer.cpp | 87 ++++++++++ paddle/gserver/layers/BilinearInterpLayer.h | 45 +++++ paddle/gserver/tests/test_LayerGrad.cpp | 20 +++ paddle/math/Matrix.cpp | 154 ++++++++++++++++++ paddle/math/Matrix.h | 44 +++++ paddle/math/tests/test_matrixCompare.cpp | 66 ++++++++ proto/ModelConfig.proto.m4 | 10 ++ python/paddle/trainer/config_parser.py | 35 ++++ .../paddle/trainer_config_helpers/layers.py | 69 +++++++- .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/test_bilinear_interp.py | 33 ++++ 15 files changed, 781 insertions(+), 4 deletions(-) create mode 100644 paddle/gserver/layers/BilinearInterpLayer.cpp create mode 100644 paddle/gserver/layers/BilinearInterpLayer.h create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index c1d7a7ce81530..01443466105b5 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -263,6 +263,12 @@ interpolation_layer :members: interpolation_layer :noindex: +bilinear_interp_layer +------------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: bilinear_interp_layer + :noindex: + power_layer ----------- .. automodule:: paddle.trainer_config_helpers.layers diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 5d750333e1e35..aa4720f6ca749 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -240,4 +240,60 @@ extern void hl_CMRNorm_backward( size_t channels, size_t height, size_t width, size_t sizeX, real alpha, real beta); +/** + * @brief Bilinear interpolation forward. + * + * @param[in] inData input value. + * @param[in] inImgH input image height. + * @param[in] inImgW input image width. + * @param[in] inputH input batchSize. + * @param[in] inputW input image data dim. + * @param[out] outData output value. + * @param[in] outImgH output image height. + * @param[in] outImgW output image width. + * @param[in] outputH output batchSize. + * @param[in] outputW output image data dim. + * @param[in] numChannels number of channels. + * + */ +extern void hl_bilinear_forward(const real* inData, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + real* outData, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels); + + /** + * @brief Bilinear interpolation backward. + * + * @param[out] inGrad input gradient. + * @param[in] inImgH input image height. + * @param[in] inImgW input image width. + * @param[in] inputH input batchSize. + * @param[in] inputW input image data dim. + * @param[in] outGrad output gradient. + * @param[in] outImgH output image height. + * @param[in] outImgW output image width. + * @param[in] outputH output batchSize. + * @param[in] outputW output image data dim. + * @param[in] numChannels number of channels. + * + */ +extern void hl_bilinear_backward(real* inGrad, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + const real* outGrad, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels); + #endif /* HL_CNN_H_ */ diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index 38e359c3eb2f3..aa9442fb80237 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -89,4 +89,28 @@ inline void hl_CMRNorm_backward( size_t channels, size_t height, size_t width, size_t sizeX, real alpha, real beta) {} +inline void hl_bilinear_forward(const real* inData, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + real* outData, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels) {} + +inline void hl_bilinear_backward(real* inGrad, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + const real* outGrad, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels) {} + #endif // HL_CNN_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index abac83a3e0447..f965adc13575c 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -522,7 +522,7 @@ void hl_CMRNorm_backward(size_t frameCnt, const real* inV, size_t height, size_t width, size_t sizeX, real alpha, real beta) { size_t threadsNum = frameCnt * height * width; - size_t blocksX = (threadsNum + 1024 -1) / 1024; + size_t blocksX = (threadsNum + 1024 - 1) / 1024; size_t blocksY = 1; dim3 threads(1024, 1); dim3 grid(blocksX, blocksY); @@ -531,3 +531,135 @@ void hl_CMRNorm_backward(size_t frameCnt, const real* inV, height, width, sizeX, alpha, beta, inDiff); CHECK_SYNC("hl_CMRNorm_backward"); } + +__global__ void KeBilinearInterpFw(const size_t nthreads, + const real* in, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + real* out, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels, + const real ratioH, + const real ratioW) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + if(tid < nthreads) { + int outIdH = tid / (outputW / numChannels); + int outIdW = tid % (outputW / numChannels); + + int inIdH = ratioH * (outIdW / outImgW); + int hId = (inIdH < inImgH - 1) ? 1 : 0; + real hlambda = ratioH * (outIdW / outImgW) - inIdH; + + int inIdW = ratioW * (tid % outImgW); + int wId = (inIdW < inImgW - 1) ? 1 : 0; + real wlambda = ratioW * (tid % outImgW) - inIdW; + + const real* inPos = &in[outIdH * inputW + inIdH * inImgW + inIdW]; + real* outPos = &out[outIdH * outputW + outIdW]; + for (int c = 0; c < numChannels; ++c) { + // bilinear interpolation + outPos[0] = (1.f - hlambda) * + ((1.f - wlambda) * inPos[0] + wlambda * inPos[wId]) + + hlambda * ((1.f - wlambda) * inPos[hId * inImgW] + + wlambda * inPos[hId * inImgW + wId]); + inPos += inImgH * inImgW; + outPos += outImgH * outImgW; + } + } +} + +void hl_bilinear_forward(const real* inData, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + real* outData, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels) { + int threadNum = outputH * outImgH * outImgW; + int blocks = (threadNum + 1024 - 1) / 1024; + + real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + + KeBilinearInterpFw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( + threadNum, inData, inImgH, inImgW, inputH, inputW, outData, + outImgH, outImgW, outputH, outputW, numChannels, ratioH, ratioW); + CHECK_SYNC("hl_bilinear_forward failed"); +} + +__global__ void KeBilinearInterpBw(const size_t nthreads, + real* in, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + const real* out, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels, + const real ratioH, + const real ratioW) { + int tid = blockIdx.x * blockDim.x + threadIdx.x; + + if(tid < nthreads) { + int outIdH = tid / (outputW / numChannels); + int outIdW = tid % (outputW / numChannels); + + int inIdH = ratioH * (outIdW / outImgW); + int hId = (inIdH < inImgH - 1) ? 1 : 0; + real hlambda = ratioH * (outIdW / outImgW) - inIdH; + + int inIdW = ratioW * (tid % outImgW); + int wId = (inIdW < inImgW - 1) ? 1 : 0; + real wlambda = ratioW * (tid % outImgW) - inIdW; + + const real* outPos = &out[outIdH * outputW + outIdW]; + real* inPos = &in[outIdH * inputW + inIdH * inImgW + inIdW]; + for (int c = 0; c < numChannels; ++c) { + atomicAdd(&inPos[0], (1.f - hlambda) * (1.f - wlambda) * outPos[0]); + atomicAdd(&inPos[wId], (1.f - hlambda) * wlambda * outPos[0]); + atomicAdd(&inPos[hId * inImgW], hlambda * (1.f - wlambda) * outPos[0]); + atomicAdd(&inPos[hId * inImgW + wId], hlambda * wlambda * outPos[0]); + inPos += inImgH * inImgW; + outPos += outImgH * outImgW; + } + } +} + +void hl_bilinear_backward(real* inGrad, + const size_t inImgH, + const size_t inImgW, + const size_t inputH, + const size_t inputW, + const real* outGrad, + const size_t outImgH, + const size_t outImgW, + const size_t outputH, + const size_t outputW, + const size_t numChannels) { + int threadNum = outputH * outImgH * outImgW; + int blocks = (threadNum + 1024 - 1) / 1024; + + real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + + KeBilinearInterpBw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( + threadNum, inGrad, inImgH, inImgW, inputH, inputW, outGrad, + outImgH, outImgW, outputH, outputW, numChannels, ratioH, ratioW); + CHECK_SYNC("hl_bilinear_backward failed"); +} \ No newline at end of file diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/gserver/layers/BilinearInterpLayer.cpp new file mode 100644 index 0000000000000..f43086e585535 --- /dev/null +++ b/paddle/gserver/layers/BilinearInterpLayer.cpp @@ -0,0 +1,87 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "BilinearInterpLayer.h" +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" + +namespace paddle { + +REGISTER_LAYER(bilinear_interp, BilinearInterpLayer); + +size_t BilinearInterpLayer::getDataDimSize() { + getOutput().setFrameHeight(outImgH_); + getOutput().setFrameWidth(outImgW_); + return outImgH_ * outImgW_ * numChannels_; +} + +bool BilinearInterpLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + CHECK_EQ(1, config_.inputs_size()); + + const BilinearInterpConfig& conf = config_.inputs(0).bilinear_interp_conf(); + inImgH_ = inputLayers_[0]->getOutput().getFrameHeight(); + inImgW_ = inputLayers_[0]->getOutput().getFrameWidth(); + if (inImgH_ == 0) { + inImgH_ = conf.img_size_y(); + } + if (inImgW_ == 0) { + inImgW_ = conf.img_size_x(); + } + outImgH_ = conf.out_size_y(); + outImgW_ = conf.out_size_x(); + numChannels_ = conf.num_channels(); + + CHECK(outImgH_ > 0 && outImgW_ > 0); + CHECK(inImgH_ > 0 && inImgW_ > 0); + CHECK(numChannels_); + + return true; +} + +void BilinearInterpLayer::forward(PassType passType) { + Layer::forward(passType); + size_t batchSize = getInput(0).getBatchSize(); + size_t size = getDataDimSize(); + { + REGISTER_TIMER_INFO("FwResetTimer", getName().c_str()); + resetOutput(batchSize, size); + } + + MatrixPtr inV = getInputValue(0); + MatrixPtr outV = getOutputValue(); + { + REGISTER_TIMER_INFO("FwBilinearInterpTimer", getName().c_str()); + outV->bilinearForward(*inV, inImgH_, inImgW_, outImgH_, outImgW_, + numChannels_); + } +} + +void BilinearInterpLayer::backward(const UpdateCallback& callback) { + (void) callback; + + MatrixPtr inputG = getInputGrad(0); + MatrixPtr outG = getOutputGrad(); + { + REGISTER_TIMER_INFO("BwBilinearInterpTimer", getName().c_str()); + if (inputG) { + inputG->bilinearBackward(*outG, outImgH_, outImgW_, inImgH_, inImgW_, + numChannels_); + } + } +} +} // namespace paddle diff --git a/paddle/gserver/layers/BilinearInterpLayer.h b/paddle/gserver/layers/BilinearInterpLayer.h new file mode 100644 index 0000000000000..24f5b99910405 --- /dev/null +++ b/paddle/gserver/layers/BilinearInterpLayer.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Layer.h" +#include "paddle/math/Matrix.h" + +namespace paddle { + +/** + * @brief A layer for bilinear interpolation which is + * used on conv layer output. + * + * @note The config file api is bilinear_interp_layer. + */ +class BilinearInterpLayer : public Layer { +protected: + size_t outImgH_, outImgW_; + size_t inImgH_, inImgW_; + size_t numChannels_; + +public: + explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {} + + virtual ~BilinearInterpLayer() {} + + size_t getDataDimSize(); + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + void forward(PassType passType); + void backward(const UpdateCallback& callback = nullptr); +}; + +} // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index c5723f8574ab3..425d669206cce 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -31,6 +31,26 @@ P_DECLARE_double(checkgrad_eps); P_DECLARE_bool(thread_local_rand_use_global_seed); P_DECLARE_bool(prev_batch_state); +TEST(Layer, BilinearInterpLayer) { + TestConfig config; + config.layerConfig.set_type("bilinear_interp"); + config.biasSize = 0; + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf(); + + bilinear->set_img_size_x(32); + bilinear->set_img_size_y(32); + bilinear->set_out_size_x(64); + bilinear->set_out_size_y(64); + bilinear->set_num_channels(4); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + } +} + TEST(Operator, dot_mul) { TestConfig config; config.layerConfig.set_size(10); diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index a6ff2f3b35d04..469255719701a 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -23,6 +23,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include +#include "hl_cnn.h" #include "hl_gpu.h" #include "hl_table_apply.h" #include "hl_top_k.h" @@ -1144,6 +1145,56 @@ void GpuMatrix::addColumnVector(const Matrix& b) { BaseMatrix::addColVector(const_cast(b)); } +void GpuMatrix::bilinearForward(const Matrix& in, + const size_t inImgH, + const size_t inImgW, + const size_t outImgH, + const size_t outImgW, + const size_t numChannels) { + CHECK(dynamic_cast(&in)); + + const size_t outputW = getWidth(); + const size_t outputH = getHeight(); + const size_t inputW = in.getWidth(); + const size_t inputH = in.getHeight(); + + real* outData = getData(); + const real* inData = in.getData(); + + if (inImgH == outImgW && inImgW == outImgW) { + this->copyFrom(in); + } else { + hl_bilinear_forward(inData, inImgH, inImgW, + inputH, inputW, outData, outImgH, outImgW, + outputH, outputW, numChannels); + } +} + +void GpuMatrix::bilinearBackward(const Matrix& out, + const size_t outImgH, + const size_t outImgW, + const size_t inImgH, + const size_t inImgW, + const size_t numChannels) { + CHECK(dynamic_cast(&out)); + + const size_t inputW = getWidth(); + const size_t inputH = getHeight(); + const size_t outputW = out.getWidth(); + const size_t outputH = out.getHeight(); + + real* inGrad = getData(); + const real* outGrad = out.getData(); + + if (outImgH == inImgH && outImgW == inImgW) { + this->copyFrom(out); + } else { + hl_bilinear_backward(inGrad, inImgH, inImgW, + inputH, inputW, outGrad, outImgH, outImgW, + outputH, outputW, numChannels); + } +} + /** * CpuMatrix */ @@ -3598,6 +3649,109 @@ void CpuMatrix::classificationErrorMulti(Matrix& output, Matrix& label, } } +void CpuMatrix::bilinearForward(const Matrix& in, + const size_t inImgH, + const size_t inImgW, + const size_t outImgH, + const size_t outImgW, + const size_t numChannels) { + CHECK(dynamic_cast(&in)); + + size_t outputW = getWidth(); + size_t outputH = getHeight(); + size_t inputW = in.getWidth(); + size_t inputH = in.getHeight(); + + real* outData = getData(); + const real* inData = in.getData(); + + const real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + const real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + + if (inImgH == outImgH && inImgW == outImgW) { + this->copyFrom(in); + } else { + for (int k = 0; k < outputH; ++k) { // loop for batches + for (int i = 0; i < outImgH; ++i) { // loop for images + int h = ratioH * i; + int hid = (h < inImgH - 1) ? 1 : 0; + real hlambda = ratioH * i - h; + + for (int j = 0; j < outImgW; ++j) { + int w = ratioW * j; + int wid = (w < inImgW - 1) ? 1 : 0; + real wlambda = ratioW * j - w; + // calculate four position for bilinear interpolation + const real* inPos = &inData[k * inputW + h * inImgW + w]; + real* outPos = &outData[k * outputW + i * outImgW + j]; + for (int c = 0; c < numChannels; ++c) { // loop for channels + // bilinear interpolation + outPos[0] = (1.f - hlambda) * + ((1.f - wlambda) * inPos[0] + wlambda * inPos[wid]) + + hlambda * ((1.f - wlambda) * inPos[hid * inImgW] + + wlambda * inPos[hid * inImgW + wid]); + inPos += inImgH * inImgW; + outPos += outImgH * outImgW; + } + } + } + } + } +} + +void CpuMatrix::bilinearBackward(const Matrix& out, + const size_t outImgH, + const size_t outImgW, + const size_t inImgH, + const size_t inImgW, + const size_t numChannels) { + CHECK(dynamic_cast(&out)); + + size_t inputW = getWidth(); + size_t inputH = getHeight(); + size_t outputW = out.getWidth(); + size_t outputH = out.getHeight(); + + real* inGrad = getData(); + const real* outGrad = out.getData(); + + const real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + const real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + + if (inImgH == outImgH && inImgW == outImgW) { + this->copyFrom(out); + } else { + for (int k = 0; k < outputH; ++k) { // loop for batches + for (int i = 0; i < outImgH; ++i) { // loop for images + int h = ratioH * i; + int hid = (h < inImgH - 1) ? 1 : 0; + real hlambda = ratioH * i - h; + + for (int j = 0; j < outImgW; ++j) { + int w = ratioW * j; + int wid = (w < inImgW - 1) ? 1 : 0; + real wlambda = ratioW * j - w; + + real* inPos = &inGrad[k * inputW + h * inImgW + w]; + const real* outPos = &outGrad[k * outputW + i * outImgW + j]; + for (int c = 0; c < numChannels; ++c) { // loop for channels + inPos[0] += (1.f - hlambda) * (1.f - wlambda) * outPos[0]; + inPos[wid] += (1.f - hlambda) * wlambda * outPos[0]; + inPos[hid * inImgW] += hlambda * (1.f - wlambda) * outPos[0]; + inPos[hid * inImgW + wid] += hlambda * wlambda * outPos[0]; + inPos += inImgH * inImgW; + outPos += outImgH * outImgW; + } + } + } + } + } +} + //////////////////////////////////////////////////////////////// // functions executed via cpu // //////////////////////////////////////////////////////////////// diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 5c15c94012816..b4922d7e6f546 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -930,6 +930,22 @@ class Matrix : public BaseMatrix { virtual void paramReluBackwardDiff(Matrix& oGrad, Matrix& data, Matrix& W) { LOG(FATAL) << "Not implemented"; } + virtual void bilinearForward(const Matrix& in, + const size_t inImgH, + const size_t inImgW, + const size_t outImgH, + const size_t outImgW, + const size_t numChannels) { + LOG(FATAL) << "Not implemented"; + } + virtual void bilinearBackward(const Matrix& out, + const size_t outImgH, + const size_t outImgW, + const size_t inImgH, + const size_t inImgW, + const size_t numChannels) { + LOG(FATAL) << "Not implemented"; + } }; inline std::ostream& operator<<(std::ostream& os, const Matrix& mat) { @@ -1191,6 +1207,20 @@ class GpuMatrix : public Matrix { int contextLength, int contextStart, int totalPad, size_t beginPad); + + void bilinearForward(const Matrix& in, + const size_t inImgH, + const size_t inImgW, + const size_t outImgH, + const size_t outImgW, + const size_t numChannels); + + void bilinearBackward(const Matrix& out, + const size_t outImgH, + const size_t outImgW, + const size_t inImgH, + const size_t inImgW, + const size_t numChannels); }; class CpuMatrix : public Matrix { @@ -1469,6 +1499,20 @@ class CpuMatrix : public Matrix { void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label); void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label); void classificationErrorMulti(Matrix& output, Matrix& label, real threshold); + + void bilinearForward(const Matrix& in, + const size_t inImgH, + const size_t inImgW, + const size_t outImgH, + const size_t outImgW, + const size_t numChannels); + + void bilinearBackward(const Matrix& out, + const size_t outImgH, + const size_t outImgW, + const size_t inImgH, + const size_t inImgW, + const size_t numChannels); }; class SharedCpuMatrix : public CpuMatrix { diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index e1bda79a8acb1..2ff19e7b3f87c 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -88,6 +88,72 @@ void MatrixCheckErr(const Matrix& matrix1, const Matrix& matrix2) { EXPECT_EQ(count, 0) << "There are " << count << " different element."; } +void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, + int channels) { + int inWidth = imgSizeH * imgSizeW * channels; + int outWidth = 2 * imgSizeH * 2 * imgSizeW * channels; + + // forward + MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr target = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpu = GpuMatrix::create(numSamples, outWidth, false, true); + MatrixPtr targetCheck = CpuMatrix::create(numSamples, outWidth, false, false); + + input->randomizeUniform(); + inputGpu->copyFrom(*input); + + target->bilinearForward(*input, imgSizeH, imgSizeW, + 2 * imgSizeH, 2 * imgSizeW, channels); + targetGpu->bilinearForward(*inputGpu, imgSizeH, imgSizeW, + 2 * imgSizeH, 2 * imgSizeW, channels); + + // check + targetCheck->copyFrom(*targetGpu); + MatrixCheckErr(*target, *targetCheck); + + // backward + MatrixPtr inputGrad = CpuMatrix::create(numSamples, inWidth, false, false); + MatrixPtr inputGpuGrad = GpuMatrix::create(numSamples, inWidth, false, true); + + MatrixPtr targetGrad = CpuMatrix::create(numSamples, outWidth, false, false); + MatrixPtr targetGpuGrad = GpuMatrix::create(numSamples, outWidth, false, + true); + MatrixPtr targetCheckGrad = + CpuMatrix::create(numSamples, inWidth, false, false); + + inputGrad->randomizeUniform(); + targetGrad->randomizeUniform(); + inputGpuGrad->copyFrom(*inputGrad); + targetGpuGrad->copyFrom(*targetGrad); + + inputGrad->bilinearBackward(*targetGrad, 2 * imgSizeH, 2 * imgSizeW, + imgSizeH, imgSizeW, channels); + inputGpuGrad->bilinearBackward(*targetGpuGrad, 2 * imgSizeH, 2 * imgSizeW, + imgSizeH, imgSizeW, channels); + + // check + targetCheckGrad->copyFrom(*inputGpuGrad); + MatrixCheckErr(*inputGrad, *targetCheckGrad); +} + +TEST(Matrix, BilinearFwdBwd) { + for (auto numSamples : {5, 10}) { + for (auto channels : {8, 16}) { + for (auto imgSizeH : {14, 28}) { + for (auto imgSizeW : {16, 30}) { + VLOG(3) << " numSamples=" << numSamples + << " channels=" << channels + << " imgSizeH=" << imgSizeH + << " imgSizeW=" << imgSizeW; + testBilinearFwdBwd(numSamples, imgSizeH, imgSizeW, channels); + } + } + } + } +} + void testMatrixProjectionForward(int contextStart, int contextLength, bool padding, int batchSize, int inputDim) { MatrixPtr cpuInput = std::make_shared(batchSize, inputDim); diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 25e36f9c4c168..8bdcd70a417b8 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -203,6 +203,15 @@ message OperatorConfig { optional int32 num_filters = 7; } +message BilinearInterpConfig { + // The size if input feature map. + required uint32 img_size_x = 1; + required uint32 img_size_y = 2; + // The size if output feature map. + required uint32 out_size_x = 3; + required uint32 out_size_y = 4; + required uint32 num_channels = 5; +} message ImageConfig { // The image data dimensionality. @@ -225,6 +234,7 @@ message LayerInputConfig { // If the input layer has multi-output. // Set the argument name. optional string input_layer_argument = 9; + optional BilinearInterpConfig bilinear_interp_conf = 10; } message LayerConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index fb47fd0c6f0c3..82446e980d81c 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -461,6 +461,7 @@ def __init__( sparse_update=None, gradient_clipping_threshold=None, conv=None, + bilinear_interp=None, norm=None, pool=None, image=None, @@ -723,6 +724,18 @@ def __init__( if output_x is not None: config_assert(output_x <= 0) +# please refer to the comments in proto/ModelConfig.proto +@config_class +class BilinearInterp(Cfg): + def __init__( + self, + img_size_x = None, + img_size_y=None, + out_size_x = None, + out_size_y = None, + num_channels = None): + self.add_keys(locals()) + # please refer to the comments in proto/ModelConfig.proto @config_class class Pool(Cfg): @@ -953,6 +966,13 @@ def TestData(data_config, async_load_data=None): " Data definition") g_config.test_data_config.async_load_data = async_load_data +def parse_bilinear(bilinear, input_layer_name, bilinear_conf): + bilinear_conf.img_size_x = bilinear.img_size_x; + bilinear_conf.img_size_y = bilinear.img_size_y; + bilinear_conf.out_size_x = bilinear.out_size_x; + bilinear_conf.out_size_y = bilinear.out_size_y; + bilinear_conf.num_channels = bilinear.num_channels; + def parse_pool(pool, input_layer_name, pool_conf): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in ['max-projection', 'avg-projection', @@ -2306,6 +2326,21 @@ def __init__( config_assert(input_layer1.size == input_layer2.size, 'the two vector inputs should be of the same size') +@config_layer('bilinear_interp') +class BilinearInterpLayer(LayerBase): + def __init__( + self, + name, + inputs, + device=None): + super(BilinearInterpLayer, self).__init__( + name, 'bilinear_interp', 0, inputs=inputs, device=device) + input_layer = self.get_input_layer(0) + self.set_layer_size(input_layer.size) + parse_bilinear(self.inputs[0].bilinear_interp, + input_layer.name, + self.config.inputs[0].bilinear_interp_conf); + @config_layer('sum_to_one_norm') class SumToOneNormLayer(LayerBase): def __init__( diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 5e7e66a908ee0..59df4646faae9 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -40,8 +40,8 @@ 'img_cmrnorm_layer', 'addto_layer', 'concat_layer', 'lstm_step_layer', 'recurrent_group', 'memory', 'StaticInput', 'expand_layer', 'scaling_layer', - 'power_layer', 'interpolation_layer', 'trans_layer', - 'sum_to_one_norm_layer', + 'power_layer', 'interpolation_layer', 'bilinear_interp_layer', + 'trans_layer', 'sum_to_one_norm_layer', 'get_output_layer', 'LayerType', 'context_projection', 'beam_search', 'maxid_layer', 'GeneratedInput', 'SubsequenceInput', 'gru_step_layer', 'recurrent_layer', @@ -92,6 +92,7 @@ class LayerType(object): EXPAND_LAYER = 'expand' INTERPOLATION_LAYER = 'interpolation' + BILINEAR_INTERP_LAYER = 'bilinear_interp' POWER_LAYER = 'power' SCALING_LAYER = 'scaling' TRANS_LAYER = 'trans' @@ -1252,6 +1253,70 @@ def interpolation_layer(input, weight, name=None, layer_attr=None): size=input[0].size) +@wrap_name_default() +@layer_support() +def bilinear_interp_layer(input, + img_size_x=None, + img_size_y=None, + out_size_x=None, + out_size_y=None, + num_channels=None, + name=None, + layer_attr=None): + """ + This layer is to implement bilinear interpolation on conv layer output. + + Please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation + + The simple usage is: + + .. code-block:: python + + bilinear = bilinear_interp_layer(input, + img_size_x, + img_size_y, + out_size_x, + out_size_y, + num_channels) + + :para input: A input layer. + :type input: LayerOutput. + :para img_size_x: previous layer output width. + :type img_size_x: int|None + :para img_size_y: previous layer output height. + :type img_size_y: int|None + :para out_size_x: bilinear interpolation output width. + :type out_size_x: int|None + :para out_size_y: bilinear interpolation output height. + :type out_size_y: int|None + :para num_channels: number of channels of input layer. If None, + it will be set automatically from previous output. + :type num_channels: int|None + :para name: The layer's name, which cna not be specified. + :type name: None|basestring + :para layer_attr: Extra Layer attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + assert input.layer_type == LayerType.CONV_LAYER + assert isinstance(input.activation, LinearActivation) + assert img_size_x > 0 and img_size_y > 0 + assert out_size_x > 0 and out_size_y > 0 + if num_channels is None: + assert input.numfilters is not None + num_channels = input.num_filters + Layer(name=name, + inputs=Input(input.name, + bilinear_interp=BilinearInterp(img_size_x=img_size_x, + img_size_y=img_size_y, + out_size_x=out_size_x, + out_size_y=out_size_y, + num_channels=num_channels)), + type=LayerType.BILINEAR_INTERP_LAYER, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input]) + @wrap_name_default() @layer_support() def power_layer(input, weight, name=None, layer_attr=None): diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index fc2acbd41ed90..e8be0023e7013 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -8,7 +8,7 @@ configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers -test_rnn_group) +test_rnn_group test_bilinear_interp) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py new file mode 100644 index 0000000000000..7815b34abcc25 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py @@ -0,0 +1,33 @@ +from paddle.trainer_config_helpers import * + +settings( + batch_size=1000, + learning_rate=1e-5 +) + +data = data_layer(name='data', size=2304) + +conv = img_conv_layer(input=data, + filter_size = 3, + num_channels=1, + num_filters=16, + padding=1, + act=LinearActivation(), + bias_attr=True) + +bilinear = bilinear_interp_layer(input=conv, + img_size_x=32, + img_size_y=32, + out_size_x=64, + out_size_y=64, + num_channels=16) + +pool = img_pool_layer(input=bilinear, + num_channels=4, + pool_size=2, + stride=2, + pool_type=MaxPooling()) + +fc = fc_layer(input=pool, size=384, bias_attr=False) + +outputs(fc) \ No newline at end of file From 460320a41c24c6f1150bd4d9a9e499474ca28935 Mon Sep 17 00:00:00 2001 From: liaogang Date: Sun, 30 Oct 2016 10:13:09 +0800 Subject: [PATCH 055/180] fix type unmatch on gcc --- paddle/math/Matrix.cpp | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 602d7db035deb..ce4d2ac39938f 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -3786,6 +3786,7 @@ void CpuMatrix::bilinearForward(const Matrix& in, size_t outputH = getHeight(); size_t inputW = in.getWidth(); size_t inputH = in.getHeight(); + (void)(inputH); real* outData = getData(); const real* inData = in.getData(); @@ -3798,20 +3799,20 @@ void CpuMatrix::bilinearForward(const Matrix& in, if (inImgH == outImgH && inImgW == outImgW) { this->copyFrom(in); } else { - for (int k = 0; k < outputH; ++k) { // loop for batches - for (int i = 0; i < outImgH; ++i) { // loop for images - int h = ratioH * i; - int hid = (h < inImgH - 1) ? 1 : 0; + for (size_t k = 0; k < outputH; ++k) { // loop for batches + for (size_t i = 0; i < outImgH; ++i) { // loop for images + size_t h = ratioH * i; + size_t hid = (h < inImgH - 1) ? 1 : 0; real hlambda = ratioH * i - h; - for (int j = 0; j < outImgW; ++j) { - int w = ratioW * j; - int wid = (w < inImgW - 1) ? 1 : 0; + for (size_t j = 0; j < outImgW; ++j) { + size_t w = ratioW * j; + size_t wid = (w < inImgW - 1) ? 1 : 0; real wlambda = ratioW * j - w; // calculate four position for bilinear interpolation const real* inPos = &inData[k * inputW + h * inImgW + w]; real* outPos = &outData[k * outputW + i * outImgW + j]; - for (int c = 0; c < numChannels; ++c) { // loop for channels + for (size_t c = 0; c < numChannels; ++c) { // loop for channels // bilinear interpolation outPos[0] = (1.f - hlambda) * ((1.f - wlambda) * inPos[0] + wlambda * inPos[wid]) + @@ -3838,6 +3839,7 @@ void CpuMatrix::bilinearBackward(const Matrix& out, size_t inputH = getHeight(); size_t outputW = out.getWidth(); size_t outputH = out.getHeight(); + (void)(inputH); real* inGrad = getData(); const real* outGrad = out.getData(); @@ -3850,20 +3852,20 @@ void CpuMatrix::bilinearBackward(const Matrix& out, if (inImgH == outImgH && inImgW == outImgW) { this->copyFrom(out); } else { - for (int k = 0; k < outputH; ++k) { // loop for batches - for (int i = 0; i < outImgH; ++i) { // loop for images - int h = ratioH * i; - int hid = (h < inImgH - 1) ? 1 : 0; + for (size_t k = 0; k < outputH; ++k) { // loop for batches + for (size_t i = 0; i < outImgH; ++i) { // loop for images + size_t h = ratioH * i; + size_t hid = (h < inImgH - 1) ? 1 : 0; real hlambda = ratioH * i - h; - for (int j = 0; j < outImgW; ++j) { - int w = ratioW * j; - int wid = (w < inImgW - 1) ? 1 : 0; + for (size_t j = 0; j < outImgW; ++j) { + size_t w = ratioW * j; + size_t wid = (w < inImgW - 1) ? 1 : 0; real wlambda = ratioW * j - w; real* inPos = &inGrad[k * inputW + h * inImgW + w]; const real* outPos = &outGrad[k * outputW + i * outImgW + j]; - for (int c = 0; c < numChannels; ++c) { // loop for channels + for (size_t c = 0; c < numChannels; ++c) { // loop for channels inPos[0] += (1.f - hlambda) * (1.f - wlambda) * outPos[0]; inPos[wid] += (1.f - hlambda) * wlambda * outPos[0]; inPos[hid * inImgW] += hlambda * (1.f - wlambda) * outPos[0]; From 212d3391029b49526eb617aa30c49119034c6724 Mon Sep 17 00:00:00 2001 From: zhouxiao-coder Date: Mon, 31 Oct 2016 16:36:17 +0800 Subject: [PATCH 056/180] Adding an introduction doc for Paddle to implement simplest linear regression. --- demo/introduction/README.md | 4 ++ demo/introduction/dataprovider.py | 24 +++++++ demo/introduction/evaluate_model.py | 36 ++++++++++ demo/introduction/train.sh | 21 ++++++ demo/introduction/trainer_config.py | 32 +++++++++ doc/index.md | 1 + doc/introduction/index.md | 101 ++++++++++++++++++++++++++ doc/introduction/parameters.png | 1 + doc_cn/index.rst | 2 +- doc_cn/introduction/index.md | 105 ++++++++++++++++++++++++++++ doc_cn/introduction/parameters.png | Bin 0 -> 44469 bytes 11 files changed, 326 insertions(+), 1 deletion(-) create mode 100644 demo/introduction/README.md create mode 100644 demo/introduction/dataprovider.py create mode 100755 demo/introduction/evaluate_model.py create mode 100755 demo/introduction/train.sh create mode 100644 demo/introduction/trainer_config.py create mode 100644 doc/introduction/index.md create mode 120000 doc/introduction/parameters.png create mode 100644 doc_cn/introduction/index.md create mode 100644 doc_cn/introduction/parameters.png diff --git a/demo/introduction/README.md b/demo/introduction/README.md new file mode 100644 index 0000000000000..bebf1d090d986 --- /dev/null +++ b/demo/introduction/README.md @@ -0,0 +1,4 @@ +This folder contains scripts used in PaddlePaddle introduction. +- use `bash train.sh` to train a simple linear regression model +- use `python evaluate_model.py` to read model parameters. You can see that `w` and `b` are very close to [2, 0.3]. + diff --git a/demo/introduction/dataprovider.py b/demo/introduction/dataprovider.py new file mode 100644 index 0000000000000..be8c0bc89156c --- /dev/null +++ b/demo/introduction/dataprovider.py @@ -0,0 +1,24 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer.PyDataProvider2 import * +import random + +# define data types of input: 2 real numbers +@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) +def process(settings, input_file): + for i in xrange(2000): + x = random.random() + yield [x], [2*x+0.3] + diff --git a/demo/introduction/evaluate_model.py b/demo/introduction/evaluate_model.py new file mode 100755 index 0000000000000..8cfb843c42105 --- /dev/null +++ b/demo/introduction/evaluate_model.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Print model parameters in last model + +Usage: + python evaluate_model.py +""" +import numpy as np +import os + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +def main(): + print 'w=%.6f, b=%.6f from pass 29' % (load('output/pass-00029/w'), + load('output/pass-00029/b')) + +if __name__ == '__main__': + main() diff --git a/demo/introduction/train.sh b/demo/introduction/train.sh new file mode 100755 index 0000000000000..06db8edd105ad --- /dev/null +++ b/demo/introduction/train.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +set -e + +paddle train \ + --config=trainer_config.py \ + --save_dir=./output \ + --num_passes=30 \ + 2>&1 |tee 'train.log' diff --git a/demo/introduction/trainer_config.py b/demo/introduction/trainer_config.py new file mode 100644 index 0000000000000..3e3df5583282a --- /dev/null +++ b/demo/introduction/trainer_config.py @@ -0,0 +1,32 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +# 1. read data. Suppose you saved above python code as dataprovider.py +data_file = 'empty.list' +with open(data_file, 'w') as f: f.writelines(' ') +define_py_data_sources2(train_list=data_file, test_list=None, + module='dataprovider', obj='process',args={}) + +# 2. learning algorithm +settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) + +# 3. Network configuration +x = data_layer(name='x', size=1) +y = data_layer(name='y', size=1) +y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) +cost = regression_cost(input=y_predict, label=y) +outputs(cost) + diff --git a/doc/index.md b/doc/index.md index df03a33fac98c..a4dffb0405a6b 100644 --- a/doc/index.md +++ b/doc/index.md @@ -3,6 +3,7 @@ PaddlePaddle Documentation User Guide ---------- +* [Introduction](introduction/index.md) * [Quick Start](demo/quick_start/index_en.md) * [Build and Installation](build/index.rst) * [Contribute Code](build/contribute_to_paddle.md) diff --git a/doc/introduction/index.md b/doc/introduction/index.md new file mode 100644 index 0000000000000..004ca07844da0 --- /dev/null +++ b/doc/introduction/index.md @@ -0,0 +1,101 @@ +# Introduction + +PaddlePaddle is a deep learning platform open-sourced by Baidu. With PaddlePaddle, you can easily train a classic neural network within a couple lines of configuration, or you can build sophisticated models that provide state-of-the-art performance on difficult learning tasks like sentiment analysis, machine translation, image caption and so on. + +## 1. A Classic Problem + +Now, to give you a hint of what using PaddlePaddle looks like, let's start with a fundamental learning problem - **simple linear regression** : you have observed a set of two-dimensional data points of `X` and `Y`, where `X` is an explanatory variable and `Y` is corresponding dependent variable, and you want to recover the underlying correlation between `X` and `Y`. Linear regression can be used in many practical scenarios. For example, `X` can be a variable about house size, and `Y` a variable about house price. You can build a model that captures relationship between them by observing real estate markets. + +## 2. Prepare the Data + +Suppose the true relationship can be characterized as `Y = 2X + 0.3`, let's see how to recover this pattern only from observed data. Here is a piece of python code that feeds synthetic data to PaddlePaddle. The code is pretty self-explanatory, the only extra thing you need to add for PaddlePaddle is a definition of input data types. + +```python +# dataprovider.py +from paddle.trainer.PyDataProvider2 import * +import random + +# define data types of input: 2 real numbers +@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) +def process(settings, input_file): + for i in xrange(2000): + x = random.random() + yield [x], [2*x+0.3] +``` + +## 3. Train a NeuralNetwork in PaddlePaddle + +To recover this relationship between `X` and `Y`, we use a neural network with one layer of linear activation units and a square error cost layer. Don't worry if you are not familiar with these terminologies, it's just saying that we are starting from a random line `Y' = wX + b` , then we gradually adapt `w` and `b` to minimize the difference between `Y'` and `Y`. Here is what it looks like in PaddlePaddle: + +```python +# trainer_config.py +from paddle.trainer_config_helpers import * + +# 1. read data. Suppose you saved above python code as dataprovider.py +data_file = 'empty.list' +with open(data_file, 'w') as f: f.writelines(' ') +define_py_data_sources2(train_list=data_file, test_list=None, + module='dataprovider', obj='process',args={}) + +# 2. learning algorithm +settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) + +# 3. Network configuration +x = data_layer(name='x', size=1) +y = data_layer(name='y', size=1) +y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) +cost = regression_cost(input=y_predict, label=y) +outputs(cost) +``` + +Some of the most fundamental usages of PaddlePaddle are demonstrated: + +- The first part shows how to feed data into PaddlePaddle. In general cases, PaddlePaddle reads raw data from a list of files, and then do some user-defined process to get real input. In this case, we only need to create a placeholder file since we are generating synthetic data on the fly. + +- The second part describes learning algorithm. It defines in what ways adjustments are made to model parameters. PaddlePaddle provides a rich set of optimizers, but a simple momentum based optimizer will suffice here, and it processes 12 data points each time. + +- Finally, the network configuration. It usually is as simple as "stacking" layers. Three kinds of layers are used in this configuration: + - **Data Layer**: a network always starts with one or more data layers. They provide input data to the rest of the network. In this problem, two data layers are used respectively for `X` and `Y`. + - **FC Layer**: FC layer is short for Fully Connected Layer, which connects all the input units to current layer and does the actual computation specified as activation function. Computation layers like this are the fundamental building blocks of a deeper model. + - **Cost Layer**: in training phase, cost layers are usually the last layers of the network. They measure the performance of current model, and provide guidence to adjust parameters. + +Now that everything is ready, you can train the network with a simple command line call: + ``` + paddle train --config=trainer_config.py --save_dir=./output --num_passes=30 + ``` + +This means that PaddlePaddle will train this network on the synthectic dataset for 30 passes, and save all the models under path `./output`. You will see from the messages printed out during training phase that the model cost is decreasing as time goes by, which indicates we are getting a closer guess. + + +## 4. Evaluate the Model + +Usually, a different dataset that left out during training phase should be used to evalute the models. However, we are lucky enough to know the real answer: `w=2, b=0.3`, thus a better option is to check out model parameters directly. + +In PaddlePaddle, training is just to get a collection of model parameters, which are `w` and `b` in this case. Each parameter is saved in an individual file in the popular `numpy` array format. Here is the code that reads parameters from last pass. + +```python +import numpy as np +import os + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +print 'w=%.6f, b=%.6f' % (load('output/pass-00029/w'), load('output/pass-00029/b')) +# w=1.999743, b=0.300137 +``` + +
![](./parameters.png)
+ +Although starts from a random guess, you can see that value of `w` changes quickly towards 2 and `b` changes quickly towards 0.3. In the end, the predicted line is almost identical with real answer. + +There, you have recovered the underlying pattern between `X` and `Y` only from observed data. + + +## 5. Where to Go from Here + +- Build and Installation +- Quick Start +- Example and Demo + diff --git a/doc/introduction/parameters.png b/doc/introduction/parameters.png new file mode 120000 index 0000000000000..f47e74c94fffa --- /dev/null +++ b/doc/introduction/parameters.png @@ -0,0 +1 @@ +../../doc_cn/introduction/parameters.png \ No newline at end of file diff --git a/doc_cn/index.rst b/doc_cn/index.rst index d2d50fbdb47f2..715da44fb41d4 100644 --- a/doc_cn/index.rst +++ b/doc_cn/index.rst @@ -3,7 +3,7 @@ PaddlePaddle文档 使用指南 -------- - +* `介绍 `_ * `快速入门 `_ * `编译与安装 `_ * `用户接口 `_ diff --git a/doc_cn/introduction/index.md b/doc_cn/introduction/index.md new file mode 100644 index 0000000000000..164cb7d4943df --- /dev/null +++ b/doc_cn/introduction/index.md @@ -0,0 +1,105 @@ +# 简介 + +PaddlePaddle 是起源于百度的开源深度学习平台。它是简单易用的:你可以通过简单的十数行配置搭建经典的神经网络模型;它也是高效强大的:PaddlePaddle可以支撑复杂集群环境下超大模型的训练,令你受益于深度学习的前沿成果。在百度内部,已经有大量产品线使用了基于PaddlePaddle的深度学习技术。 + +这份简短的介绍将像你展示如何利用PaddlePaddle解决一个经典的学习问题。 + +## 1. 一个经典的任务 + +让我们从一个基础问题开始:单变量的线性回归。问题假定观测到了一批二维空间上的点`(x, y) `,并且已知 `x` 和 `y` 之间存在着某种线性关系,我们的目标是通过观测数据还原这个线性关系。作为一个简单基础的模型,线性回归却有着广泛的应用场景。比如可以想象一个资产定价的简化场景,其中 `x` 对应于房屋的大小,`y` 对应于房屋价格。我们可以通过观察市场上房屋的情况获得二者之间的关系,从而为新房屋的定价提供参考。 + + +## 2. 准备数据 + +假设变量 `X` 和 `Y` 的真实关系为: `Y = 2X + 0.3`,这里展示如何使用观测数据还原这一线性关系。如下Python代码将随机产生2000个观测点,它们将被用作PaddlePaddle的输入。产生PaddlePaddle的输入数据和写一段普通的Python脚本几乎一样,你唯一需要增加的就是定义输入数据的类型。 + +```python +# -*- coding:utf-8 -*- +# dataprovider.py +from paddle.trainer.PyDataProvider2 import * +import random + +# 定义输入数据的类型: 2个浮点数 +@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) +def process(settings, input_file): + for i in xrange(2000): + x = random.random() + yield [x], [2*x+0.3] +``` + +## 3. 训练模型 + +为了还原 `Y = 2X + 0.3`,我们先从一条随机的直线 `Y' = wX + b` 开始,然后利用观测数据调整 `w` 和 `b` 使得 `Y'` 和 `Y` 的差距不断减小,最终趋于相同。这个过程就是模型的训练过程,而 `w` 和 `b` 就是模型的参数,即我们的训练目标。 + +在PaddlePaddle里,该模型的网络配置如下。 + +```python +# -*- coding:utf-8 -*- +# trainer_config.py +from paddle.trainer_config_helpers import * + +# 1. 定义数据来源,调用上面的process函数获得观测数据 +data_file = 'empty.list' +with open(data_file, 'w') as f: f.writelines(' ') +define_py_data_sources2(train_list=data_file, test_list=None, + module='dataprovider', obj='process',args={}) + +# 2. 学习算法。控制如何改变模型参数 w 和 b +settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) + +# 3. 神经网络配置 +x = data_layer(name='x', size=1) +y = data_layer(name='y', size=1) +# 线性计算单元: y_predict = wx + b +y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) +# 损失计算,度量 y_predict 和真实 y 之间的差距 +cost = regression_cost(input=y_predict, label=y) +outputs(cost) +``` +这段简短的配置展示了PaddlePaddle的基本用法: + +- 首先,第一部分定义了数据输入。一般情况下,PaddlePaddle先从一个文件列表里获得数据文件地址,然后交给用户自定义的函数(例如上面的`process`函数)进行读入和预处理从而得到真实输入。本文中由于输入数据是随机生成的不需要读输入文件,所以放一个空列表(`empty.list`)即可。 + +- 第二部分主要是选择学习算法,它定义了模型参数如何改变。PaddlePaddle提供了很多优秀的学习算法,但这里使用一个简单的基于momentum的算法就足够了,它每次读取12个数据进行计算和模型更新。 + +- 最后一部分是神经网络的配置。由于PaddlePaddle已经实现了丰富的网络单元(Layer),所以很多时候你需要做的只是声明正确的网络单元并把它们拼接起来。这里使用了三种网络单元: + - **数据层**:数据层 `data_layer` 是神经网络的入口,它读入数据并将它们传输到下游的其它单元。这里数据层有两个,分别对应于变量 `X` 和 `Y`。 + - **全连接层**:全连接层 `fc_layer` 是基础的计算单元,这里利用它建模变量之间的线性关系。计算单元是神经网络的核心,PaddlePaddle支持大量的计算单元和任意深度的网络连接,从而可以挖掘复杂的数据关系。 + - **回归损失层**:回归损失层 `regression_cost`是众多损失函数层的一种,它们在训练过程作为网络的出口,用来计算模型的表现,并指导模型参数的改变。 + +这样定义了网络结构并保存为`trainer_config.py`之后,运行训练命令即可: + ``` + paddle train --config=trainer_config.py --save_dir=./output --num_passes=30 + ``` + +PaddlePaddle将在观测数据集上迭代训练30轮,并将每轮的模型结果存放在 `./output` 路径下。从输出日志可以看到,随着轮数增加损失函数的输出在不断的减小,这意味着模型在不断的改进,直到逼近真实解:` Y = 2X + 0.3 ` + +## 4. 模型检验 + +训练完成后,我们希望能够检验模型的好坏。一种常用的做法是用模型对另外一组数据进行预测,然后评价预测的效果。但在这个例子中,由于已经知道了真实答案,我们可以直接观察模型的参数是否符合预期来进行检验。 + +PaddlePaddle将每个模型参数作为一个numpy数组单独存为一个文件,所以可以利用如下方法读取模型的参数。 + +```python +import numpy as np +import os + +def load(file_name): + with open(file_name, 'rb') as f: + f.read(16) # skip header for float type. + return np.fromfile(f, dtype=np.float32) + +print 'w=%.6f, b=%.6f' % (load('output/pass-00029/w'), load('output/pass-00029/b')) +# w=1.999743, b=0.300137 +``` +
![](./parameters.png)
+ +从图中可以看到,虽然 `w` 和 `b` 都使用随机值初始化,但在起初的几轮训练中它们都在快速逼近真实值,并且后续仍在不断改进,使得最终得到的模型几乎与真实模型重合。 + +这样,我们就完成了对单变量线性回归问题的解决:将数据输入PaddlePaddle,训练模型,最后验证结果。 + +## 5. 推荐后续阅读 + +- 安装/编译:PaddlePaddle的安装与编译文档。 +- 快速入门 :使用商品评论分类任务,系统性的介绍如何一步步改进,最终得到产品级的深度模型。 +- 示例:各种实用案例,涵盖图像、文本、推荐等多个领域。 diff --git a/doc_cn/introduction/parameters.png b/doc_cn/introduction/parameters.png new file mode 100644 index 0000000000000000000000000000000000000000..2ec67480951e21f0400bce1c34b3108dcd65c18c GIT binary patch literal 44469 zcmeGEWmr{P_dgE9ra`)-5u_XGMp9|%25CWR)1A^F2D#}}IwUvU-67rG-TW6v&;32W z_s`46>*A8V_KG>@nsba#j7gZXq6|7JF)9oU4Eh^c$#*a?AU_xw*b-z!;7H%1sXp)@ zEcl(wYnb9e(rw@aioL8h7zPH@_~{2WUo_7OI6>P=P0LA3L0-Vb?jx(Qsoi@sR=1D# zz}YY`LT&=UuOH2vj49nd+Sq~x+=QwAIYR*W{pm0p73DvtI9Ur*X(=dEO4vD?QF60# zv9eQ%pi)v&3OSmZ3%rw*{(Cv_Ntnvg$;n=Tjm_27mDTkXtDU0-8wWo>KN~wI8z(0V za0Uz5-PXz2jl~vB{pTkC-bd05Y~pBT?__0XOZjwPRgWXF(@={Zu=}dbg3wj~8)$ry}Aqnrt zJ4x*N%z|mNjsu;kasnq7>yr9u4N>j-_j$+x$N``L*~13J*9wV;-h*ojwx8Z0f8p$b z(lyntL%>Sci-NU7(7fxr6{A7q=djd9@R5Gl>>z2%_sEh`@(M+n(#`)l9PNi9=85&^ zjDL<}8NyyDVz;>%V1+hlc*!|yY^9Q}aDkErKTQe=xtHohpUv)2T`(Hq%8YeMN1=9w{p`nT@6dg;|i$1#-@^ZBlY@m}ua-7c}GdX`L_L64!! z2EOFhb#<5+m#Ij3McW>$`4pEsT>HwNog>y>AuRcz!~dV#+(3Td_(w;2v6P_~d!Hz_$P; z>e_`GpA9jff8r<>u!Ck@Tk9DVbj@vUa4B3MZN@y#_5LuHSBh!y9Oz2sZQF+W5VMVo z@HHr}4>3gFAmtT2%#B%TpCx$OL9*6=EH7Adv6L7p9zTy)?8Am)2AKyVrCHx8TfyIM z|5~UUvs-NR=DS||s{iQ2QqdU|8NRvP&Jjo;(*tM}98W@wW| zx*@Fkb$lZwLn>v3-|z)AL3p=F7^Ka>OpZ(y_B4OM>OZGKW1fV+@-~maYFfM#FL|G# zrP+&0@df6P8=dD6dWw<6^O3j=PKf#)%t=`K$)>2g&PCP7*-zQoFTXKq4(?m6zmb&{ z%aV?o6Z^a~QDE|Lcb%_WU5p##W$(r!C0?LW#O`%vuU4qVu@-2aN)rNe8|t{`mZzBV zRxziLmLF&75+aIo4L71^ie*%wlqTX$l~t}GS7^{6pxg8)D}mzxQXYW5vKVZy7!uhv z*Z^Su7u=s_190EsHk2pM}*Yi>g4Yp~j@^0+6YrW}$gsD!8P}A8R)5C8!dM_gjmY6P~Y2iQMSCa5-z`5wG3C974jh=oh6o zXUkrtHn5o$hps(!ZFn9Fu9fqKlq>M*BBMES@q?tx1wLM4eDsoJL)W95A&L}~=+)k) zSi`<}H_X#uETYR4&`}VsVrXQq4htEq%WY4@s|b4o`d}r}lB0^pMeR$P_Xw2ON8-wP zn4vcaX|GaIVH-LxV&62U26opTUX+JIJO`3CoSG)mio{pDVq6tED)Dx1smkf`n`X-U z25UD{cXiIg4Y$#X+p$?-BpSK{?*oR=4m7lSfh-$D0(<)qGE2rVeL}kLQ=(r_(DZP_ zH2pEuK>>zf@4h=`oImr{_whE3WVpvxpK;@qdVeKVS`w{!#pkr}?oXOlc7vxUpZR4S z!t-$k&mcE$%mH_6y3E9!SO)hx5u17m$m{TPrUjXQ^707WpnuZ=dmZ zw|oSyWs-5E%^jYdK95ez&Ag%qDn8l9Aa+rD9pXRFz`};74_A90jD!0OZ9PqsFUiGV zl~7WM48>dMoIcd?{x?^f|8^RSr?`OyZX*6meYx2`iQp}>zmc(c9}ywGGWL}PLJ($hN`QQmxOp(k1^jn{HJL^&au2dggS<`R1N7Ch8*8ES(@sI1gk0>MBxU0T zHzABhiV{(e zChuF9IiK78=~{4}T+BmK{AssYR;%=7WUsY*aoSSlxZ&!DlXycP{>c)?v_pBvZ7vfp>swnb)6(uaJwGwVN&q-=zsnLj)Fkr?V+2L&9e2Y8is%1A0I1-(T3iL{)YY?{aXW{YqR)+(;?9qAUc|b$3sn}#m@!>4{P!t zA1<3xLNoY&0!cSIEBY6y4bKK=F1lGx=Jr^gNqZ=E4Um7?&3f_XJa&t0^*vFuLMVKm z@6T4dX72XTrM1V1J|Gv{cfjIcid?1Fry`_xqtRq)b#{?^_Uq@Ny%aj>re^h?eC#B# zlLS(FOOD3oP+ANcYYmJ^XQab>$C*ki)<@vc%d0V>ol}OsQ5r=$%U|SJVf$LkMbF1o zwJI&K7ragt=zO$YCIijPwGW#feX6zEPS*R5`(F*E%fyfm&h*mgCKBup;x{@;v8}JJ zQ}hmzkgIfo+U7WLKC}7PzxL_5TK7M*o2l$cDR|_dwHWz>k0>KiwjM9sPdBd9d~UXF zXzrIoD5eRvMc$*FX#yemRPR2J7#07nr0Jo~cPLfZ zM|*XW?RD>BjvQf=7WF(08UgcBn)k(TRz3Iv-@Z5&AM6Cyy=1$_V?*Ej^TaoXP9ste zK(wj@W7j4=2rWR}!5hw0D|b`+t|@31sS3$F=MkUJBEAoH6zJDi`7XBx`;^Vq3iObM zv$FQpTTijN_V4z5j_xx3<#GN&(-zyu2fO+K2({OOPFqWaefbexM~&XMbwXP$UKHME zDw@2LhP7J{Sur9fzQ-Me2Ri4g20?j`08Z)KI8>xEH6ajlpXn$1%QW~jK$*C6bthEe z_J<9ZbsM@mot8vG_h(}--XDc-Oe_f{AjXs7#lm4T&>J>Fcj4#*5UD~T!O;%UlTJO( z1tAz@NwrQ4EVdAM_nq+{#m}gQk6Mr@mNt^@q%RNV^?j)B^oSC9Z|@vnB9Pxq36cDS z*o+b^hdQiw);ikGR-uD82Row41oZ^ef2#S1%DjB}vIg@Or`+&m5qOWJXID{hpUd8$ z5y)lr%zPzO$xv-O9at-LQ}^!Uc3dT=^+axKg2_T{xC5*8td1{}3X1*u4Ia+%yr=2~ zruM%ZHrqUZTeFc3rL!g#}52U7&=xq1o!Y;&NKD=t0{z~_FI$wD~*hNevC zE@kuO;Px48l$|2a1l3`Rt59u>!yC2FeJIHPn*!)y@ue#jqsRmaN}wmOlKEd;-#miS-qB$^n*C;Lsw6Gq4p^q|2O<1}0#YqGAVxl#^Sn^w zQ??R|Q(7czWyrdEKYI!aF0H9`U5;{AM|5Vyc6nl)RRu$pSznx@vcC8E#Gqr38#5gO z6&XvAM?aVa2}vLN_;9NT9ocyUE1NgA3|{6*E)vFazT1>gv204yxEk+FbP`=;uD=*x!fDycZBzIvSW6dxF?7ddG!aLn~1g{j&c=6zg9sE zt`YIG;mNI8Mc%qO-?cF5p|wzilMwIa(lQj-kSn0tU2)bij}|cJpO$%dU$gm5I1l0& zPQqXKu&^t4Q>G0gcSBZn^CyIe`Qs|XiXV5G2>gPq(|{W`hV+KaRW3qQyS#o0alY1Q zcy;+K>$w^l37qMfgZUK-9b_q*8$H!BDRMKa&!2S%YL=dc%0)g^pczw7O?sNH{+U7` z6>KC!Kc$Qq7UbA+{VCkUM+S!-*6k(k>A%#`ULGF$e#jFtB?+B!i#hf1E?rk1OK%dm z*$#OfK_PH+ti=)!m0krngj=nCo)x=~y^yd)LgyHi`*8`}sZRKl^%+`meP^}z0U*|F z40gT#mpcau!{V#8APn?~YAqe>=_^jL-)R+}<`Q|OY>^75KH8=vCgSQzgj0;3dHma3TnQt`q-Qk5BfM`+QBa^UtuxiB9}7n z&_!mezX+sB5+Da8>F(HcMiMPKTH^UMJwDXy-s}Nj23`}(eKm@=`InQR%N}yB_@jvT zP0cTl+5@(!ZT(8iSiHk`&q(>$b$_UNjow_|?3SzodC+AgHrBT3-X(tXDcZYp*;c{t zWy_)@z%akNivy;{Q%?H!9E9g)(rH?v=p|&o;0_C$|Dv3gwKIC6qS42pZJcYcMTmKIXx~9qFxONjj>F)bl_|9`A?Ob z_4Pw*%n#S&4Wnzpn*zq&FsL;UNz$qPd!TX00H!w0ZQ<;P@Q| z!B|S`$CjIqVjMYyeQ!Vf=N!JC-_y zQV8JpBeh@)~(jp(T(Wk9Qf*d%FtmoZrK+yYa2hW$^c8l`8lo!&~wf#1N+o6z)>G8OW z!WOF&0Q$r7%uxzSTn>RkmJ>|W_gD;Dmh)NMF;ouQqd5mPa`CJ+N3CcryH#=&of8F` zf{(@e^#YG!xOC(yufr9?ngJ zjzA0c??RBo?vB7`V{?dxMU|Qa*#3d^@?hetE|iL={KUQauuAc04~nApvO*AgygN;y z%yRSGNVJ%I1YzHD zyeXj6KXT$69OIEv1eRg!B8cf zmK@&{P;8>Dg#^S*N}`Hi5Z>J`K1Od0BwK7`Y;QTU8qWri0ZYZ$RjyF%&rAeDx}x8E z0PB57Bn-kaFld5Azry_tXGeN-3ZedVeJrq1G|<{I<+l3U;p!Wjb}^)|JvGNR907{p!bcFe2M^m zj;XNeAiCS6wnc5uRx}Nn!q7#q1n^dt?W^?u%w9?=JsWrDG@HqQ?a!egbsYzQ1zATVhyW;?G>F z1pUJFD@q!(M|q$eQ?p3Nlr8qdTk&SkbG+^#`rZ{&4yj73Kr2fO<2(HVDuj zXKT32?Xj+$mUF)X&5~#Ur}}r@-<2Jqz zL866eJBZWr|GV0?aMEGs#?fSBN5RqP{8F!(uJ4{>d6Ek6;WEwm8~Qu9!Rp6G9q)P* zV)+rvK%+ZG!Y^CV*o*)L(pF(1M&t$=NOyvO9H?LUahBjoEZ0qoXL5x&bP`#P<)T{_ z?M@bv_&i)&C|B-Jm%-_cxdM9ZRiEKQ0Kngtv%e}nv6WX5nVD>35*Xt#8nQgFPcj#v zMM@TEymM8m5iC=f23GKMp6XWA_kX8*vR`QZm7w3adS-b0NqnP>q`n;gpt8qI0I|L{ zvrn7kOO5!Q@OSdMRB^4SUFyy}Qwu<5R@~#rXbD6zS?i89dS%n#an9~@=T`b7&ibNQ zyQ0(k!+5?w09TLzB;BaH8vifICDE}w}vwmpS=@Wa9fSK0@B5kYXJdt1mGhtB!Ar1 z++5mGO!R^``-()wiw}S!fv1ClLsBT`w&w=cT~acXnFy2={d44sc5b%E;RhGmgMXGT zmIi!gxujO~dZyGcVVcQN8p;x7R7v9vYW2KJGH$@$ZZ&|!#tXG^0nvhpRuQswbFFg4 zqFw$HOo?1V!fxbu%Vjm%&P@VvP_p&Dc-MRQ-VR8;dRhS1^*`MaXo8Ma->pTPYA5Yag&%j`#i- z$p~O|9>#xsR~m9YjFM=AtXSR6T~;=aeCcW-M?pbx+y6DCm)2b8yj%C+6>s3dQ0KHA z0Ei{EA|5v7W=$=+wzVDFNKak>z%N_1(fFhKimDfObQ*qY6z$}?)!rV|>%*!Y9ZSYw zlJfbfWJxarYt-3Y=ytE7SaiA6xNR7KME!2HQ_Zi@lEvCl#Jr8HW_$p}WTebQ=F)$m??U!Gl?M(DJhd1n%_W_aP0{6r9Kl~@0%pV$B3vKW5?6B&0I~$Qn znKo$><(VDLlyL<*kO?>#gG=J#vLjt8t&o#og@($7y48^^8H*e6F!KF2 zlt4__9DLe_DY_>D3HTJf@?}y6ye#*R(p|c@cXKgoSC3J4SN=rT6NKE$UquwiuCe{> zV{0P4zr7rS20nXb@s~b`E#fbW1+-e9ZVV_oxd#*Wb^BB7T@v;>BqbPiU7FHAF1dLO zo;kOd+-!?Lo|nllYml$2;oSlvK5Ff|oVN$!OZ*frNww;vJE_!33m&^HQ@)wn73NQh zASXg*ZksR#{rv8)!8V>hlTGKjEdUSd59EW2z6y+Sc5$ANxj&^ekanL|($0~m3~*2TK#^u*UwbW`2Y$I9I2vEp%`lY_b>v7_YVKu6$?$ z7AkpTLUUms|Azc_tYM0)I?Rx8{7%7;^L4suj>Fb&QNBeJ{wc>@O2SkC(adt|QpgF= zFxUc<`s2#F{iStrEuxah^~)#0hwf~;>kxPuu37&4R0#u(T-am1)y8dzgalrvM1zTA z4Y>A*U1i`up^u&)UTtryac!LWG6@eUBi<9;R9z&IN=84sshV05W`{6N;g{Z0Qh+jx z_nlO8ZRB*2To-n;9&T(jnhp59eCG()))3gmYr@1X;k5EE^7=*(x*;F-4*RJmqW0-f zUBE8>rnpfpAB_*?oV*{_+)U%R_*U4OUBp)FE{?e1WuG` z%=_U>{l(a2`nv!*n@i=mG>lI;D4Wn@FXg~sQVN}OwCFalrxDficB#NWbr>hol;eWp z3(PWl%yu!4jk^RBQ1{bP zPrFZb7CH~IvyZjvd2%o`BJg-#3z!8`zTRR`hw&14wfUca%P6kJD6`#g$_VcW84IB8 zD9-xyqHz+7`YpGRR<4w#E1{w7B<1r7r%HZjAeP%2tu;N~AKJ`kaT%x_f(s;6;)_a8 zV`R~cj3uh>{=V!r>~yUY0iE-Ut4lGDcz*2e(|0HFOLF#`#FwRy>=!*0Qj}z?@!y;H z3?b4zrv@D}0SIWNUe}JiuQ~urUZu&$`-vu=>u-L^g>=OQ5-^O{$U}w=Oeg{a_V>r~h`hz;Og|W@`Zmctpx~ z$4N*OUc>0AHuH7dfDubj?tVQq&LUcZ(r9s==;9#P$L!a!AFb{F*AQSvhL3nbz$o>A z(L+(>8*&%&c}FgIF}}et{gc#nHp!PevTvgJJ|FP!Ug58en^Vg~JHuehMxjT`@#_v0MY?J*tK1DBggg}zES)h`?#u(o>!1d}w$DXe`X>~CR`X*`UY$-Q}41L-EFX|)JH8v+kt-z_pmAY z*wP+`dwkS}S$}Geqp!0;&4}wy4sfWWT;1<~X5P{jN)Qf@JHDfPRQ%LCasj6&I9`tn zt~@&5pPh=@%AvgN5`FLoYy~_>=K2!=8>rJm_E)ITTMoCLANyrBhG>!MPRoZC)R#Sy z7Vrb`CZ44H2nlP$QWMg9_Q-8zp`X*E-bc<-4&*zj_)>s?qV)d?D0|33UrU$OSO9y< zCCi;ZQ9A}kXG=p3{X^}r!xLlrmPLKZ=0on94RFD#3Fz(X`?@J^#;GZR(o~yq}$vg zT0leepkb=^EBkLtqPA>4DV=Ayep2C!oPQ_L%3HBu6_q%{zTrG23W(Q9PceOqhCt_t zTO^iO;bz?w-vStwZjeq^j(RMwdo9&R?_ppUI^}-fw#*0V)0p-G5E_p|-w;sM_Fkg7 z5TRp31OJV9+xGPm{ZfDvo}#eFnSGk_br5cZ^15o%$~#3CJMja+l>g&x`@dIsy5{Br z-))(C|2o}mg>E`l!ogTYua~I5hH_Bia)UV(!u*dUA;v+~oXawpWe$`I`oqA6)A8IQ z`pT=}57D*58y!RKG(G@{6d=+?Br|DNwah6U*kAOx{T1)gD0SsVpJcxeG|$W!m^r>P zzrg2!)p~84t=G}SpPB$nUIM(khl#b+1nYj@Wrn`SJ& z@QG=iV6?ns-c1E;;6Dn07(DJ0nl*rx6pv_Le(Q?=>g&Ws?sX*oMCz72@&XajUw1{5 zy)tydj_mu%3g7x>%?%+ToRnu`)G9$RR-sgE%*y0>v9V0O^w?-a`BTuP4!#a#c<}<3 zgM)*ZhWSEgu#{dNo3NGHPs$69*%Rnrc&c9`d+poS%dd@{U82mL>r;y%9IhgthD2_k z#p}BydWJc^VF62vp=^P(Hl==9@YCDSUpXD(`x=M>wMT;YXKghLX94%%f>pp}-qN60 z4u2Vlgo(I`o_)IUefB&Un?TWwA@XJ;4$!*5StiE7tdoR?^VWxUb{gv}PS zm93$irL7?@MZRTtwIZE~j$>OV5iwTLOCVgV0*D-*E@$KXgZ8tIo@8lb@(@*R)QW|S z6HeIY;o(o50gD5ke#>%wN$#X-*0v3ZHhS|i=!c(FLl7hv0g4Mr3u_8>XFgF=@g}EV zqDN4bG!nj^(42(9L(PtzWk%DTwG1hr|t4@3Y_6^GLvbM5&; zldqfsI;h)aof5k3WhyZOOzL85xUrhGFLDqVuk#7b`I#1gb+@i`L~y)sM+P)gwpXHY zN=OyLQ&F0~c)1qY0&i>Wt(YO=F0V~Hx2FCl zw#K{8j>F+HIC77p7iZt%PpnJsNuI2A&)Q&J-&{Q=3x5Pfv7$F0kk0ycp1VF=kd#{C zVo_kq`qt=>2`o8cs|<$0;DL#G4(21`^ArN*kwgI?woJ4^HbxkN20h&R-zKXo`94gXgLE(gw?^At=rE zO%fU+z%n>VfiaY={C#9rYJN|9$L9Kfez0fpfdgOY)r}R$>J={rnrR@xjSmx0yH0} zHPWj>dj0$pLNoVpPCFnrzgaxt0ZFG1qrgY9!zK@uf4Ana_|2;JQv)ebsR=nAGJ%`- zY%9{zkl1BUKk(%E9E+OmJPyzRJos7Kj_A?@6y=CnUT>+DU9GQi`K&UkH!IQSIlK;D z-{{)4oLB97@VU*lN~Agiib(06DnBai78lO9XUj}NNPhIym;n} zLqm@6mkI+zl`Jez+(t|b8~N@A6VK}SMbx+}^{!9q+(g+Z3nL4%-(19#jN4qHYQQY2*)XBW)W{9eD*$;mzF|?)# z27DeQ4ghui<1c?xc{!LSx`-lmF_ne?8DtL#dc}(r2&O;`3I_^Zr!yzVD2q37F9 z5a$Q_y@dv}{hfHJ0@`(lr6rduZ?mJTGj})R86l{PkBPmSMJ05rpU5AI)g3YHprk4y zstOp}#VLFaR0lv^joMGOp8el8x82k+OAA2#Qx>@1^)Q;mwx%Tt(cGKQ%>=M12E#RT z<%ujuofAYYCVIFBpW)%CEtjTWtU%UZ~su)yeN3I`3E*ZMBRV2n=QMqbBEZ|YRR6kRWVPV9%5_?%9H&-Bmp=6U zpe^GyX+xiguagezuR#YTQGs(X9TACL0`J`JP(DQNGetDcBuYBJ?*>ct+{z6xXY?hp zoz62fq^^@-liKirD3s>RbX#|T2rDnakB;%TR17(LrHQx_U4*9BA8uckI`>r996ltg zH0V&7Gv6Y_d-1^LtKgHc)jbOfYmR3ye9==)k|&eGN>y5C`5Mcm_?K;kfvQc|@B(+-;fHthwTO$gcHdXXjQ_LaGQMNtsmK z_S-g%M-8=jQLr>69cbB9;l1`1%0X7V>Tl`^QOJR|$|As<;reMa?{z-`S9!4x3K>R6Z&g+^DOdY8c8JTxbJu}@At74*sRiAT2G@v35-zmcNjyyz1vEkc>(R%rMF(k>%6RCu8Zy@)Po{~a@8Lt zytiLcZ00tFN|lQenPtie#%?0Icgr!w`8H}{zz<^i`e&K2M0BuS!&efiTrR&e!OvxE zhOVoM#!c)FokCh2Pd2#?y4)7H8oCzTmhDLg&6sRNfI^4lnsq>_6l=t`XUmUlRNCdS zY2mHVH1>RJwEY8cAm9Op+G>GDXma%^(mLDrX(e+|-^{*?!0Y=1chXH#rmk9bbA%kN z|HUncK-BPrG+hlL-3izIRc_zxD6{{yOWRx7*NkpXX$?BCa#}7;%1k}*U1<1po{Tbr zst#G6cGu%dW5ew81)`TbgkDj+(d5>8gP2!9+^bcW;^U>v%jO5)GwFDJ$dtw6~3ij(;y2p3Z1*YwGXhgTMLHloZbI7jP z8+98Ii)P}w?h+EG*Ogj@uC1wj)z@NHhUf+wr>{9muWhh>+x+StVc+v{RBD4B$I%&E z2-jI7PZ-xuh}rBYG06nhf(Tfa_1faUimQ5!lzGD6S0+o=I<9lFxD?7cI+p8_1$99` zF$D~}-}8@DD8AeW1@b%6`CVl#$bGGa9s3eaN6O@!fz9_0`Nr(Ja{lBu zLz?~xoGINqGgJ2^8A*+@(J{BBz<lN{j5KN=n7RzS|?6|H~9|D^&iNn1) zW=rYUEyh2iWFSeW1oWzWzvcmJNbcQ$rXBJMV`;)@^-#yXutJNd>CEqOkvAMEm%Isx zf;Sl$W;;gXs0ga)`IWw!jrI1@y=Yq@a7!H>rHA2-Ff@9@(bcO#CMfQ(X;tEYphs;a8pA>WrF7#qcT1r#rbKbZ!LzK_CRzI=h;Kj|B; zBdu?otlWRy#X@;upZB;f?%2YI6Tu#e9-`njHe2PrQ7z&tR`A&Gut@hw`EYU~99fwU zXQ^BZBG5k}(eG^@=#~v2 zFQ;M3QLYZ^4_yfO4BixwvG@b~C0>8j-Z%mZt~-ETAoEELzIVq+lg0WGfWyS!as?Qi zHD)-1&O2+9=j)e|R>G(KuWGh3BZf01kO0%`)f8xCrh7uRxMl@4(exhXNfT$*E^kK} z&|_m>YhLoy=>8p|?gIF%J1$zrK)&h@^V7V$KlFWUbKgwa%uF4>eyQgntu#(>EVKEQ z!QlPcS4LkJvsjP1=3inghDFV_HWjpP1Q~?-*!icBy}mc0T4pv!A*-gzb0}_UJ!0}{ zXANjVCbjxRWMpiUd=K5-qFv=~w_7S(l^-ThNg*xf%Yb`q1Z>+VHY6|duRIwk+ja#E z+jRWTeb9#ci&;LPKjIM!0At!|m5w2g0>E+awdZbOSLIb%?G?bF#Wn|1^y>f+*9n-@ zTZLctRP!D5jj(N7b4aB^DPLLd!X<>YVgNlu)&wO!R!S9k-(627t{g6h*tI>1xXZ;k zM6@WoJA90#WuDQ}$S!K``axl0aige7^XqIRcT*G!+c+kFObxuD7i-BU$#_J7|DY{a?yc=x21n|$>uV6?4L9)wl zQm+Yh&D!0=`a={T)Xp)66uE7?jIO2zMm))n1_mK)``8E z=O7$s4Sf!}k>3#^WmL@grLa-&wH7O*=dj?uNd`Ti)GbbopA^zLpC-N|izRK}3-!#Qb zpsF%Cj9;oBSad1w=y?vtW2^{c+*5jAx6@Qp_(5(!(Yoy#+o}-2AM^FJ5p}})wuuR~ zx|4wf`a<(QbWa|J+`nDuY0e>m1T3__aLFhnHQ=e36;QuTDp)baXmW{K!6Mo33WyRM zHomK_f!b2vokpsb)tr&a%V`HxHCAnr%o85lvWJXs91$4le!7aVewpR97(3Fa4=$!H zBAu~?fSJ)aK*kb&aBMubbD`n*Om#5?k)UXAI1*mlLWAEIq?9XwX+M(wA5GRF$|+uBVcsQ3k$wvcc6%v ziFYPmq1(pAyh&6CFPYmqjz7`KUPp+R&<7rHNIfWQ7YObVaJ?d+hQ#;p;s5w`Co#Zk z0M^*Doo_w*<9&(NZo9s{j$vZc9<%G-WtX6_W(khHSY`Fg^n~%b@%bA;qMz*(0@>=B%s<)H;!0Z78#h~v@nstU6kY{60MSs^ylt(A1T2zyTi;+XQ zECk2ZI9W1}2$u`q)T6!X(ua??I82i^otRmy(q60c32_f_@778nC8Wf+#1Y8fc2!NC zgIR`P>M0ORX8{-H#_@gAVWGBc{>`_ZL&;XIWdIXK?ptUa=n<3)+}(2BJoG`Wsz^=TG$RU(j# z4kbhmtUD-VQjqz61aIEm$>+&A8P*CRPI_zrI zw<>|49EHG#XeB1`MBaF0MNQB~oS+Urk>aW+>P^`cT@i@0@NN#Gc$n`Cy(c#CI+o)7$}gh? zltl%8wQ&4FszA+KN?;DaH*sm&g@6ARzzB+)Zxow$Eo?=+jZAR3daNToQG;S(*bZkz zd)-Gt`^$E1Sqc9B2RXw#6$gQ;+&!7Re8ny72{?}OAOf>gwry)l_d4cHS~~SLB>nHP zj?Hf@_4D_H66%NzXXRsQIDjBtl zj)^hG7hE>=$iGldAl#{%XKd8WzuCeYU*_ZSKttxv7 ztJh@ix=mR-Zs_gjk&qr0&k_Eg5&0s$g3)LAat7SmZM+|GmBkDF);-PeD1)v{LYZyZ znC2zo@Onb&>Q9G4`oAUXpZ!qo4>x;avO$zsZ^yS?^)44&vu}cBe~LAnc@H#@6Iy;U zhPbt(isAbLzF*fQWyCnsel zz?k@5raclG`uH2?VSq;rd~W5Q9gLxz@2W|8%or!O*W!JuEDVHYnR%h;Q+aI`wLmap zGlO2^2Q~35sd@LFweS`XbB-Y=Lp7Ah!ZwdUIB^if$zKe{bALbI6-I|R_)E!XslQ5k z2^n;Ax3fUvAEQ-@b}mTe30+a96DGg)VzQgroKBA<MdqbyL`dL#up0;UPAa&>ZA#X+?!8H< zwy>OJ+kW3QhLpTw_f7uLzLD$J{@^9PTVJ*k7LvF9y#(vWR}L$Z)ki`U&lM|sLS%hM z*jX;nNddVByKVK{7;TEgZk-b(B`((FYjv`Fdmkgxg|L|#@%1(}D1@R0AE|Gu)sLuY zX=A_MVe_+()^lE@ic#~duo9F{(wI_KihFya$cq=p<=L}2qjOB$p+D#b8b;&eU$d=J z2z6wED;Em5mD;#5H2FSbKfKTUJkmNkAQ7DT3F%#F5q9@?KanBgu+2BO7=o};q9PhO znOVxLB0Rgtc#jecR@1JOG-ENxD{}F#x2H{fX{?F(WMfx8$w3|V7j8HMyn)I?+vr9^ zpKFAtjle8wHb3gFRwCwaBh9kc5B94UU5X6#a1H?S;p9E9*@H*j!Z7LEGDK*;e(kPk(uVk_5JRx9)t4iDbVMtRV~#AcK?jM5A~aP zY$oz#@9h9|`a27yn(&pVCNOwJ)&L`u{QY)lpG>@7K)GAtBu*Azey`Al;JEh)8$m z3?)dHgrt&6w{(MaDIguv-2)86d(qGLx8Akp?>qNC=bR_^-ltF@^JvSdC$(nAe9Cht zb@#)%TC90Du`pNb`74%FYx8-4tB<%=e|;fGB};-zwQO0G6WFwOSqrcuuMXEl+8yR< zJDF0A*1Ckg@-N#ExdUmC$NR+9?(iW!**5mI;5`*dg9%o|Y>Tpd=XYQnq`kGm=0cS9 z%=V{q|3alrS_oTRF9&}vJ}#^E&i5@A?EtLLWabIIRrd=RQE%?ArLFHCLSA#jZJz ze0pX*UthMj+2cipN@LZXyMC2iX1Jg=>oScaf;F3olGJbym%$Sc1rWCV7s!7+GF?JA zZK+fggA|7tjMZ!!JHSgsmP@Ecbz&^EfEdBRF{yHKJ>=l5-}|Pr1UBWS+I4`wa&{Lx?yoed%?`sW@^u0Q-i#4lonDg_$xVZDz#@g=i z#bYwBM3J88t`iajXIS35(3cpIeu1 zO%w0*v_^@Qql0BhRn6pis9K#68gJ3`TWfl;fpy%qldRy$Y90$mM4R;QWs*N&x5Pdh zojAqCEkY_WsQY&Iu5p%QzC@3^a*W!rMFO^hJY%fL=`$yJ1oTS!n~&P|HoFT7Vn=F; z-p7fZM%YmlGR1>WOkR^e=pUpX$vPp1a{B~I-05G#@q&=OcJu7D|`Q|RsQcnmqPWn^FjlCA$|2&Q1J+F1scz%N~ z(oh@{%2%B$)^j>uD^_1CiuvpD;D8|^8417idpDF%wwW+3Y|{d$2V+NaiUe=g59~hS zirhR*x!#;}r>@^tRyph3iB6Hf#FsHp+6lc!5rOhsR${rA>)ayl%F}W%3XiH1?G#!HT<{pr1jWb5c4fswd-^woT6rP zOI|Qj1d=IEEs}k5p}tyOPb!A-Skm0uIX@aBa}&{J!BM}b{fqT)7shE=eXr1Gen`bx z^-Ow2k=^81WZPX8sLEzVNVI28ffcIc<>^_u8McISa$4SMRM5PInRftO@Z+j7f(JT$ zV9SSN-l5Gu3Fi`x6eLvYw8oDlr^RotClb9T6We-jU-4q%CRQ-->EGleD&ja7$?Wl%m-kMzG_hUlPG;gs z(^taiS@w|z!?(UE$Y+-MPSp`YJ6Bs#XUsVT=r*7Q4%IHB(K5xPeFhDYW-AdwNIL62 z-91KVHS>nfkBw(0Cdxy?2i8^VpDrHcD7Zd?=-KAB)h8Mp7e#=yj*6eoRVhd^Pr-JunHpIH>9(4_cFlqTwTwjO#oJB{GGUKeRiw z-%4gK)UdZ1HPJ9iqy_5?E zp#trDnDDZ=Ij^M{g59nMv`Ht2#F{aFe!sq}zewqijgcb5Uy(*7(ym21&5I4BiW&N9 zDlHt@rV%U6lRWhz5l4%+H!#eqCPNmZA5K2HW7$&9+LI#)5hXEq{3qvi{Q`PFz5{!u zf8MV&Y?u_?SJu1|_0?^n*-o}65;a%seqFm9`T`j~tLMwU1~1@!VvF?2%>%D}co1?d z2#q@ExgzGhD&05aS9DR+efL2${Zx9cna*J-$jtHg{Ri;)o(jyzX4XZ55QBxF`!zd- zfc1*)4}>?IW;cA}JW#T30jEfIZ14P&27>?Ha0dXqSVSUJ4Pgueu!FmscT@h)#8soV zmd;xc6b+S^28UgP+?i**AhJ=f{=s)Bk(faH_E(s)uEpV?INzcSaee`nR7oN6#zxZn zt4*%nIyXJ@iN>uw?U5`Py$3Uz+@qJR#Z9l23B2t$%UqgO4mko4Y*BS|@859IvFY4f zuEhtcq_0(&O8yA7ihpg3+oC^f9 zLIV4R8PT@REZoWiw`_cV!qZX&4;CiR-V<-SsBN$h^Mxv#0BzO{HGSb|DPE>rZZHW1 zK#0RU9Iy;(9it8=my0a&CX2P6QhY_6&Zg>uYUDFNF#jr{A2FqVmS^`lvm#ijMN0mf zEM2Jny}-_-3lu%zI&s7Ad3V3lYUHfwq}9T1W80@BvR6#dk7iBU5LsoR9_)qV z3vEhzrmqjbzWXi`Y;-k~Xqy*P%z}{}ryIy? zas491(I$?5j2cKNEx})q67g=IJpD2|ObN%-b1B$ZGr6VvW;)x=6$<`@T8U3*NFtY; zPp~pqXtYQttGn;Lb-&bItjv|t*xI-Dr;TN`a|iq6e7CGv%o0p-sno@lzT?yY5uLjc z`D}732NTT3FjdO6B>fOKAYE`#v;H|ZZ+tx$_w_5RqZUGpB2>*3XZP4uin#vGXeoO9 z6>Ht{cbyVWM32)Yga5q#>Bjj!doLT|I~?FRno({-N+nq&w40L(i{4W;t@st-Xk$$+ zP%ijaQA)spoAAh^bG38Jml{C&T$!{3ooPYL%Lk4~;<(s?VyF|K(+5K~`y~kOSAt?M z$tzCnOH_@Hq%~0Zz1dx_6GX(EjqWp*;3oGM*hsWoEj8$;d?(v@%a!WN8i&xl)4$9C%!WT4LhZbP;W4O;w;YsKz(#l<}YR{ z&1J*@i4(qqKPFbFS`%(mit`1iD$uK9yP0x6)#717Hnk&5kd+}vq9910!!e0?5+f8H z^gOTf+A6LEiDkq94PwNwD`*|@{ukvBgTk+A4YrT6vm3CIxP)m3Vp8dz9?-pqp?y)( zHj8Ep78hC`q-2SEdHl1AOCBvMi6DFwJX@rmo32+D$OXVGa-A`qr7g-! z&C6i`fj!ORNCoL#RV5>k*1FOI^D(>VKv6r?H$yI;QU$c57enNJtySLU;&BfzLm}s4 z9ex5D(P^$z&CCcMeNlE7eY;5ZRd}YJyd%4AG3b^&~9`X zi%Luas<|O1*@2Jrhb||GsUpz~^gN&gFc@;zoB+)j*xq)@L~gf+``!d;!JVJWtEfJO zxWy=};$W-y(=S@i8_h!;dl&hG@~>7a#7WxHY}b+|hKE`O7^9wQ!t-q$1|FMDF++7F zJc#_NV~gF?SO*g#WnwIVO^$-1qCf<4AF7tZ@DB7PRk%rckk07MMB=2ezg&F6aFFT7 z-E-f1CDFIUi$z(_(e<@;?v0iIV~JDs&cf|Spp(=#yta|C&UPORxLjD1|{%X3r;m`o)WrUZbGP1?)m4I^| zALmB%qQQmHc`K`9XOe1I7_t4mC1XbOJpK6WG0=kn#v@74XRB7ATIQAiprL3WMAFZ~ za$-Bd)iAHN&X^g~P|?E{;twC$87v2l7S1m0b1y{Pzfy#Y>|DIfcXH~vep~(S^_vA2 z1H6xKed;NymE@Dp*QY*w5?%@U0^|!TC;iBcF*J{yBhfA|w4zMqgzf)hA)!7HJ3F20 zv}2^Im{y1(*!TJ$!uGE6tnR1A^ZW3%OAIKQFAw^ckZrurzm&7i2U6M=>B!%2OH`L; zs0?$=;Xb2HQT#NPtOk{l9k&$Vs~kvOi}e#pPXSf5vE>C>K(Q^E@OK2Q*SMj5^$%DK zD$t#|p|dbiPBMVw&GEBjdIm;S>tII&zWz=Hu@{R#NuM7(Qh)IgtDw^5ie`b|LxRiZ zH)_Bq!A{zzQ<~gO)N8$iTk=iYlA6#ezGc(>%IF}PS8rc)&?qWO{fho-_H$6sHkk~Y zAA$Nctg^$-njFA10o|RJ`*ntN$d-oyxET_VJ$HNfH#nlw_~Q%R?(YY{ z$>e);X85eV066di%?K+Dz-~x)+NYBR9Ww(4rS*r=zsDty{yo*!v2>8(;<`s6WaK4T zSit!rriabi#CY>!{^AjQ&!7vkqHH$*`k2~;usHYfL^-J~kpCT0cQ|^ATgLN8u$w&& zjHOQMrS(#>fBfMeUf^kz%&?!Ipf&ol2?P-eV3QL;#|6so@Ka6e<+CR(RUS-DsdFC| zFTU_{UZC%HIwq@DgsRPxMr=0=@c@&vfm0_TcYee{lX!V;CZ^2JMzH7FK~@_idEbaJHkRF4Td`0P$8?y*?T7 z#0pCePV`OL7Z))w?_(rGYGAa1A^KoM59R;dC;<7bPVtFA-M(8{oaOI)r6 zT|~O{%0fV<^M7egSXofSG3R-7H;)WJ8u8XDRglZ2!z_l)H?k}C@9t635nnVD?zRmdXVC=K&hz>)@LeLc*Nq>&!=}h%1TwDYM+~b#|6V zTm9bdB3GsljaERECy0U3jHH9({8Ve{VG5EUE!Df60J?$JCU#M@UzYfgY)&OblVIst00*Cdj4=Pnif3EsbU8BZDrk}QVSPTpH;Ht2~2MjAvc z!wsFM5vmGCevjccUzTBnRfADdYZ3dur@meNwgR=w)1sCqZ!Rl+tnRPk2aeD~bTI_9 zR=O{z^*f`b5CplaTuRLE5Eds#))UplOJr90~U6Jj)ikF2Fa1V>)|CD{|vJ+NXh zW)h!+WmTm_#k5k(jG8gk@3)Wv0;doTwFrjc6|)6gblKddQD!@e=sbEimaC1c8Y(XU ze?}Y3ec_pF&u#-&Lz`AM?cC%9|I$qGIvsfZYpQUSgZ4ije-IC|8JH*9DVBKTp$|k1 zQL$J})B*AVZnT*1WDXfG88^lQhhNta$GPtXv5k;?aZUHMW`wv0R-Iq3^gJL?W2#+J%8?jT@<6;1f z1qqv_kFf!5=R_*T&FBL*CqfMfEVISl$+08$o$eXAo*YvHmjVbt-l}%b;5hTFSGg>W zE%DPp^6kyO(qI3*^v|tzWj{dD+GTa67@h4=z#-y7j5XHZomedFY-P2}w2?(G9c9mS z(KSj8u%A3hmI-Tp^K0G}4<0T%b@d1d~(Z30Qr>@U6a18h5Am3sW5OpS10qH>iGi4!xbv+sz$EOnF%X z=-KvZu=nv#SZZ2fY(K;TFWQm`+KC{1@gd~_w;f6}q){I6LyeCpnM(qDP`d2@Q$g;{ zh60G9JI-3H2NKAgb5nVgv3fe+4LXEmA8h#Dnv4YGB8B;>eEqqqQa=j zgWf1h^_nUvVP*U8n$Ff)@B(UO$ndM`nzu@+u4#2wJh(J!y-z2KepoM$MtmEmMZ5#J z>ld$9hHN@K-k_=|4Sti9HEVPQo%9zhu)dDhzbExIx`X}zsEp1#sZ{a z_lUbR0Y7rKtZ@RXbL#i5mg^eqRcwccnk1?K{>W<|U*LRZpz>I@yP*c_crv8p?g|y7 zb_Xe%oKQ$tnQqMfX#*1BgPYRW$4o=z*(3S%4>_4TOQ3PmU|8y<1$iCTt%+G|l*i}E zRP@H-RE@(#kn-v$1Y5H?gZ{Z%X;3+3j{ceMkL~5Yn3`R`0VV(y^Lk5+FUr)D#uFI- z0WRYON-5@dltq;irMCoTrjm$Hi}e;Pg~P@nKetaKKCp$&eL&YNdL=OUM7>M+gWhNN z-MEOw3lV39PXO|SV2>8vbG-GsP#)1Z}gNYzU zGhW>{7vI+}MXM~gXrA;W_hs+0SGDfEqEbtb+-x(%XI#93TQ(^}+OMU8OYese+;Hu9 zf_8NY%;9uo_y#v1L4TX+Y@wSCXSQ80kJp6V7Q`PgRuVL61;4 zHuu`|B%5}W#LcP1!H$1VIOr~YJ$trVp>LYvo>aJe2_Ht+Hd_uOSL`!Qf31=Z}XO! zhWoS3q)Bv^9>x1Mw2{?Y7v|$y6&U=pe_P^5aWuREGL%H#MQl6!fV-}nHp5lG5 zfnn`i;y{yxP+W0&xJy`l^xsD zZ0s@2G7K-rRBcxf>3iVOLSs#L=<+{HOeCi4U@W?&Ck)Mmy_*?$#^aoz(T8CRBD`-QX_wM|vjR&^_+Xlw@kD>wwwmefi`!vjjs|~KU7g>*P#>NC)zPrV(9v~Q)iT!JVxx4+Kj!KT)E z$|{J5{>wXaq}uv5cb))(L1PwxgwywxVC=WVrJy&gm?~AD8&&~EYgg&(nzy41 zSPII@-GF|RW}~a+qxx0?Aer<>6YgII^p}q>AojO{I3HL8 zRtq4wOG;=}2CZ=aqJO=nQ3O}}`GF?fGmrqn zzH!ZVUmYP?1wZ+{QS_!43R^$xcO!)ObUL^^nB5EvhYaB@&n#U9JYo0%u+ooXa7i5f zSy|5kL5>viXUxoK%l>DN=IR|XcKU_vDYC_U_`d0-{vz!;`4k$8a%IN5kV)_(1}b4^ zr&rNu;nWh69U!0k%AIO@+VBJ0$LC+*(y!YbbRWT-W}}O@+R+fc9@DwFkrE<=fgs%a zzZVDmk%rzoL9RDQ0Uoyw!a;8JY8lcWPhTzeF0?6v7|J#ZGz*afw|{c~tih+G^a~A( z!z0WHzg4_10eI_8&Jx=4%Iw-Vo1{J{#mlhaD!g4CZn5yME)%Q`7MK)XvGQ-$lmvXt zah-3?(|pCcPZ(g@0ih;+UQ zGzX;S&$0{h^Bn8Gnp8U=L1}jKCS6x%LC-XRiQRg{eN|oT1O-rLJAj7> zvfa4TQaNtPbttdg&x_(Aef_o<1L3b}<;LdF@jYsdgho(vOi1>TH1`!(_0h!e+D7+% z(kB#+j0~Q|pu-6MqMfjG%f(@T#D6BaHEObi6a~iq&VOb+>vOfJx4F37c(!yk>Tt7` z`{H8W|7i$i13Ut< zU#(9OilP8T2~3B2oNrI|L9UCB7Z}&Xp!I{QzwQCk#{m)_d5THQ%uD6QqRFYLl=Q`$ zqeDadOXQ1J+>q-i8@%G3TM~*}sfMlT05XMK5pbvF+ttKhKL(fICxkRBe^%-`3(#ZW zV#H+Wi-U?TJ-W|;gxR&>T>2xfrHqVnvvRVNv)gya`C|UQMgp`IeecyhgD?Qks$Js* z#Q61$nX*3ybO~=oq!zFN*vUS*#wS4(gq4Cc;uFD4CCBTVe6{vJ$(yl)jC0BgWss%Js=#)LhxHhTyDRa0=DU80p{Vq7=Asle1qQIH#;W{@ zSMm2oNz8a8%=iv&d_YR5*a@i$e4k)FVTyjSriGZVl5qhDHV|)#CyFysEgOkAJNAEX2@Jppma`K8`Xq*X>Ne-Nu7K z=?jawcB&&8(G;ps6(^ihzT+b^zpu5KA&{H|CO%k4lfle%IBs%BX1A6IN^i+!V$X^{ zP8l894iENM$!VuK$oTcFrhA`jF8cPiDA(jpf|(K#s{j3ux*R@U^ijXylm-uXMFQ|f zp_t>y+hlRzaq~_x%%5q{3;4#LgT83gy@Go=Hr+qWbcJupcgK9G6iH)09^p^0Fib2& z{M@s$x-33vI74~Z01tb>8D$pjctyyC`!Vb_m@mKhYp2ebsahacVZ_$hfnvCzFpX-% zXt#^HL_~gyS4U1f+ZHCpRGdkB&cOww0`tR28tvO<-mNRAp;I<$C=wt^m=Mz zCUfkv>rV)z%Dexsq3$U;1GsR$oHFJej2fEmY!QgE00NYymM_ z+zs~p?_>{NYW6ioaDt{ChUB|z<*fB3wQgY|-7VIce~*Vkt}ebArBpx8Khv2(z`8IU zoz*6dAj?KdUUn;SK7KV>e?_K9WzY4{a@BF3nsNe2p>3K@+ilHTtiLB#s*GL=q;IFv ziT%|B<(u!pH~F!;5)Cwa1i`XXdUTuCq7Z*8W?!u}lHLp9*rRD4I>bwk>L8c08V2r& z2iqy+Qh@hSjUEv(CqU`x4z8kMx999I6LRRaGU7dz9>i!l_mRZ43+CqN9Zt93bJ--h zM{n`1BPCNP9*kCefC5WA)Q_N%dRRJqy4+uFufq1m=5S})O;9Hfdm{;~y0d(^)od2X zR1*am0{2MZJHb&Jdfx&9B+_qZ9M}RC)xD~MgTVIbg38CwE9$GKoogH`00;P&X@00KqB>f#!Lu*=RG24oQKl@{P5qP zup-F}o3$)3s^oran#ulkR3T8~S9)C5JuLB5!C(;^G5I!GhlGxQG>*pjmf`vHx!UVI zSk2GxPyW5YGvEb2zvDGIN}x9j%fX#VY@qbM3xKKbq3UIQO-owu?&#(97AQmhR5aax z`GrbA@nSV}i8|kNwi}mLz7H&h{jjD~Zs2{CHM$}scYPj`rzoYcDR`hHDWc>sWJr{} z+@#}+@eWzeg`U}Ka+(<9h&?S=7lVpJve^=gT!&n$AxedOBBD*;T+l6GR1!v5Xg)V{ z1)hTmJcm7DN7N($714ILLLorzO$mE$OE%pHfrhKm#CWOiLKjk(yE^&?$XK%_r+7ox zxF0KFoGAcS=|h2mkH}L0F7m2M2bN@pd>lH=B_VyftYvQ}LhW^U@wVh@{-y&y>4EIs zn!n23RSj9-nzI|qG6km)*FPuG6@-LpCZwO>Bg6wD4y3qL8DU<(-Qj`o@^*}Pnd`)1 zaZvmsr#|&HvvSlboi_-grre_E{VWV<8`$9%!1&2vFCLbNUcF;!%!7O&l>l3sPYqxUP2*j)3~4*Ea-#MrA-8)Ttln=U!`%9jU$^(`HM6WA90(3^L())bU%D@J2?M2Knur9WtjG1*8y1jXpTVm%GHI$B~sP8?TBGLHjD8o#> z({dXBdZPeL{jnV5!)Ecs=^aq?6FYfn6NhHpgAKo12RZolo?@S5LKWf$h;*t4ym@N@ z{(RO%qxK(Tw-CO9ftkVtMVC&hC#0AgAwIy?1mfp8TzK=}j{1gDx;?cwa~UX-<&Jqpr#hAK8Ms)H$sQ}&y&qQHY23%2O=vFMx;LgXe&n8pD%(A zXxYAxVFzJS-095?4Cq|y6mS~BwjrZ9rxq=Z{$;A>1}_=SHcGqted(npPBOREBUXg; zz-Ei?U;jk^5E-EV9vKEDhKLLgy-n|3Z(KbAe_m3fd+eIgOrACXq8-Z-}9^cEi z_#g{*P_wK#d8w=Dd!Zi2*~K|Dvz`2;JTkvr*s)F}4-XQb2Lf;lfqt6NL(&42>1Nz! z{iA=QV@V8(y+=jsbK{EL?avMf_#?hm5tsb@kr2CXeESQN7{OEdl}~NP8r+nwj#6+XPvbv77oiRKxdhv5jUNs5CpF@3R>MehwIGUTkMD|F11uAg zF=mQu=stGpYmi6j_Nz*<`L3P%GFgA3iK1?H)vS8Q^lmuq1HB9ydMp7*aV+OWC*H$a zMF@dg?nRlch${i3&dCA&c}Mb&YsH;aQTO8W^8j%1coawShG+WpOc?$cZytlo{6SKKX9b@R_>4k65jEP&% za=QtNoFMw3V^D<|#lqy2myM(7f`}kUreMAQ_E>QGq-N@a;L}Ho`w6Z9Bi{C z$XfF?)as}Si+xkQH=?cVDI&Ix`a0lW6s4C3vY|gJB+R5#`xm!6EGsQ%&@su1!Ay)U zHu~+j*n~#EptWN6Qb&JY=LK(~-i!WLasKflOU84XX^bKw`#$3K=Esnh8|i#zmdP_x zX4%l+4=V`diM~K|23khh*HRC`EO`lF?||G7as02DoG*X#RUA!AImN7PxYo|NFZ?*$ zS{;hSB%^I5U>8TVPW>YrT>VKC~A0MbRr2E`_RzNG0%E-%e6{I{CxZ`xFKLTYpFAtC)haC1Y&m~+`JLIn9 z@_iQc+uf+!Op?QYiLM=A=IgL>M5Io^$*Xv)GH0&zXXBRtg)bE-F1X(t=yQ$CIx{HX zp3C94+)3REv673@frtYx)k{2|m1l{mNs2rF5T6{d2-d#Gwc%bn0+H`-u%++mAxPW9 zD%kJC)i*7NyrDMMH6{F~n}Tu)k~9UfsN3UTFb`Yy)LzQ^up)3Vaws3tJ17bVNMeW+ z)5hTFB_sExn-kbTA<)t9&xFV@Rmp4(wdnrS{hO6ezOa7(oz;0A6muqci72SRPKCI# zq;tJkp^;3hXt?8hg>q$y2WH}A(EFG@h6*~on4~+t3MY92h^Aae^FcKIrRaYt5M59J z#&P2z(gz`sb(nF`xk$iPmRJ18k6{Fia%L+XL2?S#tChFfy1H-RuoL|@e_`Lt{SQ-W zqN?whqpXX*YPsSvzw6D>y$Xbx=a%KvUro7Zr7{{snqJXtssy%EmY=iqA`4Oc@W-3a zlUq~I8R;9p8LD(ypXYaw2+8yxE5DXd6fHwK( zVN!>Tx+BHk?S5A?xw^SknRN!+4grEwgv0f~T|=}NE3(I_f34x2my2FSnZD`BpGT39Zzo&rR#XyUei}ELNJM99o zm4#s2?efcWAuhDmrrIwsQ4IJ(*xzK=%n1y5V=;zNWb2rm?CjViHIV`D_~!tpBRnEN z^mtH(XqWzhF+K6JFRxh&Wth~|KPZ7wo~F`vmZo{@Z35NA*M(ZE3UAe`P>HpJlK8#= z3ME}oHW3~3)cN_y<-2w4`&5O9uTfL}sNak2ERyx66EI)2=-0UmOZeVvJzNyBJ>O6v zoLTw^E%2f02U1Qj=yreHGCv%UQJr3?$0r)pc=C%FMO9fzX-;b+BO$@OC9J8va6UJf z&p=c_A!2*i{MMrS;I{Ug|G^v*Gx-qSQ44D31*MG#n@M|1ddDTb)PkkddQs(QQq;~N zcLF=I2ewVDPcUqa@mx<;6N1QJsbKOH^Wr zE({gBGm7hRRVsR>AJ=$!7Tr@&^%9b&G4|Jsu#!LRv)X|a%)J{H^Yzm14&!9V5<7|UOIe|D$kRIuxv(lIi!M~QT75Z5}WYBJSF zE8Emj&xvdw$pEG2k-nyx6uNOduqJ8cFTu5{yHnVjG+(-m; zTx^x$5jb=8_Q2IUFvUW8!zTBqfV#2OT*e5Xv5Fx=>|&MWL1F^A^H^WK-Oh`9(5mYm z^aXjO$Hzl~YZ-D)+XKk}HRabcSzRV@8)k>N?etd#^P2CSeO6J^Te>OnL)pN;Mv6Y$ z?x;)ZY@Uh}BJwj#-1J3Y$2^O)2!0Q{weQW~r>`_AF2s41xs^Lw&d&nKHp<->&IkHg*~hvd z++>k>mc^w0$215;QAw4@1|Hc&12M(3VSy#x24RU;xqN^7a8h9TD5yk@4tg=6g57a& z&X)Y1sNJ3qV>#9=g<(mc6QIg1W)Q-O|d~izYiI+3N-A0x+ zs_J$Qxkq2{!y9kXp@V$nTTtG4Oy^Tv8by{|IoeF3CGJLtcvX8|-xJenkY_1}leo4% zUY4y*>a_eGkOwCv&GCDjsC8LxE6B2f{x@v^5=kgjqUa*7PU+PLJ>*z(x8e&dG%E${ zh{AaXPx2P^o}RF7lg{$J*+7bH2n}%M2>EtnRB6O4`#h3;Bf+cy&X3exb4=wH<+o|6 zV^#AFn9I7OFA78{%O(@60`zbIrQgWu1N8OJ;WT}~U5qSi+{6;su@Qtn-xEpxbG5M~ zAPGIHYJx40d-Y)tt~OopRLJ^h0s(#vwRmt1+uCes{VNCdN49{(Yp!^%dLN7(F6Vn= z!r5w}`)NQzqv^3*(idKIx7Aa*&#$$yqmKc&L#!)@zFwp&gs&~LmMBj0l+3a9z?1Rj zr|eimjoC_c(qBIo-;n@hOx|yO-Y@^I&697dJ(}7zu@SRF^VG%5B@TK-p^#l77D}8E zR3>97Mx~e-NZmHfy{bm}LA$hLfp##Vt~0DbJ}y*pD2y92`#$-+Y4Wfdsa^Q)CCc4UYaGJ6}7A#NTsGsSJfJ=!!j7#6ji~0ySm5ZBvdzl+4d4lmxi$`~I zFZN^AJpvQbeczEq7p&Q}@8r3j^)-lGDx2+wDvRIXvs!E6B!~qsCb3nhO&$IBPLTP13aOq6#Qq zfOLI8{G%Zazn_pF77$_$DZ9+~D0*A{k{)if7V7(Xu`;2|-4kwMDnEV6X`Qgj3uuxe zBcI9-B+CDFBU0f0SR~YyDhuQ0c1777;=oI#gJds0lgEYX4?(b4i>2}x)9qFuTCx-A z4omB|Xl#7_eZ$Csa@ph0;DQZ_pC7JPWBAeYAVcfU%BC0RAllYDr`n1wnf5~mS(#UG zROspdTzo7wNEbvaI$d{`QMz5@aI@cJaxlPxOvsGX>W3UdAv|-$o*YXv$;`9$9H!+6 zEg7&lZd^afIzPMv=1*7&aqf`EH~n6{2Q2O~1$y$P%dzUrf###UkWTRV_R?a{&b3?R zf4&j;sK|$pVlbv`XZap}6ep=f*`_b$cWFRSOf4lBgJslM_*H?!m}&h&9EGSAvwa%r z7F%;|q|g1Ht#u6`J_6ldt`Al4#mvG1=jf4echgBK`qR(R!2u-nFc&vYx$vVG9JFMoH)+F0RbEph6+ z+o;SEiB_1pLLms|bNt@Ebm9DQEH$7!sD1Qx;f868{nP6z-fYS%ao-u`&T*In>mXZK zeV27lzwBpLZpzgAcS5@}s)AD+E@`BozJL8P=V8ps-w7d^KxatSyFZ#mUDikgcB(87 z|9a)LE*BBSrGaWM7WfTOnxXdIX5x^Bcao$s-5aN*$JEABI6FreyZvrgbYY@FH1K%y z^A@dBN5fevBQGyO0jj9Ws~i&03CoPs329G`=D!-wLJ6wVf$IoV^dzg4*p7Qe(&O)= zxY67*+o#mg<>Ju9vcK&veY*A8~!w%|I^k0CnV+>Sn9wMgzzi ztK9u6M;si-WAedp%JoL1I|{*`%_#L3-N`84?5zqMoP1lpD?yU$f<@;*Q#;2OQOd## z=i!(CS4(|<0rVOA)qws*)N$17xcTX_j&3}3U)YknmQFbCKsnY+c7#B=bQK@^>`7Df ziC4++CH?J>WJfMN9M3$YDWFo|)Xc!&i;`7_*u5NDXyZMkdgA@2%F|A`?Pg8JC}3pK z8&J%429*Aliq`aP&g4%>15u-d|5dga9uM72$IhJQz>4ojg{sJ0?NqZZ!j(lpA-aC^ zbww-48c@;GYa*FtT2q%<+c*|%GM&47CR6Vv42-RYMo-ON+B)HUICAp*otYI}e!}7-B1fw>ULha$GllZ7g^F|DvP_@PT!sPr`j(OeYnE&wJ-c1SQ3g zBQJQ-j#pWjE!xi%d)%TXUQ8);Hl+?0N=-PY^CB(6-VCUJ-{mJqnac^@5-gWn{3`KM z=jRLCMYJCS120ON9C7vwv|3SnXy;!GsgN`NEnSPLQDFA2`x?I%rxeXNdN7L9rp)GV zQTs(b!0zwkNKG&zBo|vw^sbPQ*cTOFzR(ubhwJ=EgUlBYDr~5^VYLg~XesKmiA$3B zL0&eRio||j@HQ=U6!q9|P7|ue-ZT|>+&)$u5+xVip{FI;AG^=tZjH~mrryicn*$p7 z7fFH)6s1a_z$^~va5p}&t}teY*Oo00rw1*}g3E_SvyhQRm922{4u=s8XOR%_b7KZ- z2?Ub8pHZlJn7wCShbeSmiTn8L5uyLy-DqY-K&x+>=fr?effoC@^()$R*?Et5{zn+oHsrh|(a(7|Gfc`ufggz5Z% z%v8)z(p45|yhc7G_sjIP>m>K5AjIr*Ik@F@HSaG`RMExX5mzi7jK4aFm-yU=qu*Bq zT97Lif@`*hyPYbUUW~EIV2lFyl!!-bX^t`&Xy1kZudS~kim5M;Dju|XVk#QbQ*wg} zW~(RRLIEyZf$i6HjIw0nlO1ux#>Ky=pyXjPAf3tb3=(xj*x|ly!_^za|9XC55gUDy zdhk2;d!&0N{uCaykwMv>jE(HmX{!(QMyYW%@3TtK-|S3Y0~eym#LvX7sdw=FtE+y# z0Z(h1r`Ew<#oZKCVZn+SC8SMrY1&u58H*pJaK^%>gaafIWZRQWl{o}~y5~EiY~ybi zKOzZuKStD2t~iMhag7qn9cZ+3#e!ZeV9y>MU zen!94@ODaKZESz=f0;w^FO=Z&bUux7BS?{!mtUif?BJuH{8+c3U&E4mBo>A|F9LWJ0)PEwvW z0&gs(tdlp^PI-&y8O51?oLS*+{NJhaeP}i_e*wl9KikqSU^a&Bi+n6jVcN^nq+^-H zKA#ZiJ3XJ3A3bli-6r9Wo5v-;?~X7pPU($bVBDTNHcB4vpr+ zJeO9T!By+bPg|wDpo{aJW#IDAqd@e9Kv4}1d0jAnP1KPWD#^Jx0s9G)NX*kIRqt_-Y;uBWU9xJ<<#vYIrKH7vp9TVVTn ztUnAwrqDd_Pg38BL8w+5`%*SA|K-7fs1KF?_zvj77W$vH`_Rn$&&(1T8dngx`Y$7K*rW{HHJrAJt-#d}q*)e3j*T&g|dFGWD(Kw2>QHuSL zmsTOtEqfg|CYgHl(HZd+*?Bs`L_tMmt)=Zf#((Lz^hcnN9J|2^1j;&=cO|Zp*F6Lf zk?dtHbx=Po?RPc91G{N3akC$v2G{!eN0o3^5xRBa^tl6u1mCJ3mzJxHM<_ksVn5t} zbtBzN33xR~$y(B{Enc&-#r0(}?KyA7=$UOc*ya1IYJCTIwFK1Ey|&BZ&gB11;dCCp zIVBHRf4DC%JM0|G-lM$Fvi=bx%=ZFI`~JCY{?`{vz?B3PKS!+AAFGNg&SAHKAnwdY zHjE>>;!e~*dvmwjV61z7J&sP1n~Mh9oe{B0J(tE4Fq<0E46mP^yOSy2*9D@^zc=(c z7~do~AaI!f>tlopJ@osLUjczSFiYPVZ+YdrNz8CZ5S|25Vp0&yn> z>TD=Cu$(OB02AT6d!c1sJjuhO;FdhZ*j0f^bM5@R(wriZjqn)K>T5A_Q1i$`znlur`QXf5pmw%^&H|?$@g~#48&{m7tk!JKvn*44eBMXttMvs`YUxamu(b_ z<*k7_@ZSLr@^Ne~;c%z?n*JG^h z0YHI^y#?F0_OQbo%7O>!@%%Da&j>V3a+HbIY@GU>W|l5z_cx)_%4h?$EF{SIK6$B) zJH4z)_1@vqq3d?(ig!$KEPsoL*kUDAowrbtLPF@u+dtsMI-ivIjaj&-e*IgJt7Tex ztm@iCk)dIY_jr=7qE)@3IuWIczMV z<8Q<1|22f)nHt4{K+wI_ActSnFPnF+<&3v#*|~uu+u7oc{`uNa;)k_&dszEe$Kf^) z6zrz_CtIS+et1H+_B#pmT8{cb+xCkrx=+ul@60lx3azq*o=GR5Jj%2Hk{gmYkzQ zAM3f`@eDQNvw^BaHm#1%b`CKHg~wV6=QRw&{0DSDK_QK!Lc>LrdDzMpb+93f(7CjuPKkdXFl4>pRT z{4b>oVPWj8vnDIjC29fA_ZIhky<3?P(`EC{X{5;6wEz2TVF14I$NKkNq@?(L>FbVS z&3Ip3v-Q3XL4(dB<>n)kHFYmSAH6D_*lXl?QAX!Xf-s#ony&`$EKhfq);kSTK86|g zjdot`d9K!dlzbzrTkk?K3a}U{-N5~gY*@~B#1_#ot%)DzCf~+g|8~)VKPHd{uo~86 zb68FG1g3G~BxJ)r2zZBJnQ`-#;J590b(qewL;mvu=Hzh9qD}buN*mM>M-eub$AMR*=KQIkFjyj-eRPPT1HDe%9BUNCI!oc zi_l2ctRm40(}FY5!IqMPVr|vxt#QcwehS*{8-7PuX}cMbzYx#T_HIWr^BrHY9q#`o zUmvB4o)W3+f^OP)?}Tl+%ySB^9Ndd>XN5Pc=OY|6S+dr~kp4?yrCt)`PIdDN&I5ZM zm%DID^dYCqiP`(DMpTi7)p72=GG`Z*oZR8Hzjad6rP=C7p;wWcsoWPXX9U^VxW$1h z21FlKNWZ&F+sO!%ZR?G2@f%-SCJ9&!81dmJ%yS;AJWMnRI>qAdgTO(K?J|92$vpYD zED1=UIEN*lX|gzV9zAh&tvMNLA7z6%Gf`xb;&OmLC=hJ!Z((DeD%-bBWU*Qb&$R zs^4or^-T8Xt~EEFXnaB>)u`Ho{MqY)8wHNR@$bU9rxXZ80iiVL#W{Yh=3I-`mVDr zMo@IT2#SuH*NW20h-jU`DgS`;)O81>xk*`l`svBSX6;0r&*TLY-;HL$AYt`Z%j7a9 z$6s$F+8*83|MZbgC-yp5v1t0Q64@f+B5i~45hlUL6?y)S&V7VhiZ70BB3Y_|}5hMq{)ALBo~qQ6!RvH(l=X7|}x#%_Au8!-**UPV2F ze%F*f9H=J-^vLuS-w@Pqg+dx69+T}?Iz|k(W+(9yl|4ys+U>~W$hbRjZgEh8YEr5~ zl-D|V#6G4`<@b|7cA?Scouvl=6}4q{=q_HoL{w^XDpiUkRz71lkM1efNQRqrvgOAH zALrw@WjnZ-2!z51mj-zb=w3s!;j~m1rF4xHcT1{y?D=mE><>)kST*li(h8#X`3*(Y zTcKuRaUC4HJlR@!qSUcU5!~ktqtU-g?jQ*S-!;lDNQlx9VSwK#sl_f2ahZx-KV>6n zCJ3E$G~CpKr9?C>?kk{};GM>tSA?R^9*C?*p?~qe3UKgupk0Vn<uM(SF(9~XnkUXY4)YM%Z&xcsdprDsiL&($$q1!X-&)Yv z+Ro>z8Ec#k#ziuoQLgW+Xk=P z2cdFDZgW8*-8Eo%ce3z-5tkZmvMlkSZ1VtVXO5JwUC%hvx{6Kb7afY^=fnSy@qrRL zuyy`Q$sHiA*vo8b%sCIFff$44r5@e*P$evPXhHfE*DLMqxO}8@&53f-Ig#Qbb5ynb zIZK$9Ojw#Pm2(}6&|hd>whUBkD)ufPk=;3y57mZ-wDWmW%Awl<T9{DfaoA4TU$}y^3H^r?&gK_~GjbVBi2cO_Qb)(^#4FTm5SE}5-!9JT- zXdHqJifd@MNFH`74Y;ng^Y+qppVpa@B1HRp67|iA9F%aK$>TD+L6(Mo5t*YggF%N$ zX#4Bf&`Jr?7GE)C)l|_)#9>S$!{4v0bS@g8fi#I&wlXTSrIo|2UOzSuZU?6gSKL}9 z>TqXT?JYXL9QWf31;??grE2W?D?JBSTUX9CTGfZ+9_wxC|J3#Tigs~>5zPs%Jzcu| zOO5kt{hfpA7i(SZ9@8F<7#HPhb2=S*YO$1LeQcx@D$`-lBtG7Md?9MIt8Y3w{0Pg9 z$B$0CF2zvKO_Zvd+fF~Yl`w1L|B}NOkF6xW|7S*L6o?HNuf6=C;M#m+GO`w!VOjv| zhFhAN330}B-vGutAJ}{W3E%JWo|8Mr6pzjXzHdN7`7JOZ_p2aLJHRU#$)!(IJ_o8c zvGl@;TWqv}JytnMke8Q&bg++aZv-yq=YqFIs-Ga?Tj^o@|YP=Q?ZeO^S*U$hEQjwsqb5H$|-(y z8K-=1==_Qw^P2Se<7+d zVvU$iM!e8#T_=u-jn7o-%mZfoafFsg<742?=mumtEqmWe`X%sB7QN4>n}$i#M`cfh zki>7?yj`L}$-9(Gc!OBV@76uNp<*vIk|^9LiI<8C85yG(cn$y%Ge#6IRhQPQihbDTE zri7>%Xc>;`UaYxl6_Eg{Y{MQ{2_3~RJv|A+yYF_sU(;)tGRcX5EVb-ArNVyHwfRaQ zcT5ci3+#oE;ED2g3dEP%B`*`X+1wU}yhTeo@A*mc6d4r%dtIGON%)hodd~}yV z)OnchC`FEq$;7|pyH;O%Q{zc;MdA?=u)+n*UWkZ@h{kr&Ei8emF=H`iosvzAV#MSL zJCG-Tk(wFEB zaB5yG@71;25LdW2bcPu@EQvp_fzfIc+j~-TS;L@?!Lp{ji6m9p z!W)-1Unb8F4Gv|CeDUT1sn1@ut;tACgY4-fCx(F2r%nO{0L)(8hNf?e4HiMCqu-g2 z@HnvmUE9!CUU=_cUwOBH4^k#@-T_vG#U(HxVbL=lSZIsc5MYVdzWFXvk=@b9h4#`= zb%NDp&nJo50UOOsQ%InX1XANFkHcLD#bAXY8fU>1=(h z5BxcC74KoTAP9qkEVNn;ABpm03$pX#Ho9}zc8khXI0rCNuE(9@IE%;+zzi+Sg`T(4 z!ma0N&wdZUv$Ky^2(4iKddN?v4Uib3;A_MdiQ%3vg(NQn%->jVqypc3nUL)pm~^q5 zgN?}uqCSXRj%A$sE{on*YXNF{ZQ5@m9Yaif#`5OHM!AWHb6&4oFchZg6m%vf^j83d zrwDC6mRKrek0zO~nySamR0qgJP%#APY5a+$kL!N@#M~A15kb0&ADabtH5YNj@xl4OThNWt^a|QX? zjT}<7a!&Nc7Flq3j^xXcuaA#QV!wak!0kKVTrxscbQm{c366G{4;CQ~#1@a_rr*9( zk`eV00YWeYTb=`Men*6hlPb03r+$hQOuUlp`mlW~xBEPIOMMm08=Z-Zd;LaZOJCV< z)Ai41OM)iw@QpjWZ3o${j|BOFaUjr^_S3*>spcLGa6#SuYmvwSf@+b$t^vN0ncYj2 zo6qBM8TVUIFPIa#xA*5~d)(6BDURc<3GTRTX}z65l%wxt@8$(w@xJ`0?+4t3Vg>Fr zH$B)CNLyyxbP0TaLNj%bvcDioKh%T60>4?o@^H&sRBy*w8w!QKb`0&xmA2m06kz?m zGe-@qf~u!Al%tCXc&Tk2!rpJ~9r%^sl&RyJyZa}GRv;qf20@E*7^tMRnYVT7gi#e@ zC72dCr{c|8)6WSNRWh(I`{*ob4n2g&m6lTMOl))FanmE+JCEAuXImSby3vmbw<@)% zQ7tvB5S5eHx&jeG1H{RREbOk->=Tl0snN9Gcu}=_<8jF^;Z*(KTBzs^4DW_5UKX#g zN|-H_hBS{pnHE{t?+d^Os|!@O>Js7}*{m@%|IVu(?@W&~ph7d-1rqsfTphXu?bY;~ z!}%OSt1VB2%?`XByZRDmtvtw*B`#bPYpaR5xa(|n(GUYM@0B;jRs-wl=-q*!#o<(z zKi_ab-H-xRVCjhC1nGJAZXQcA>$R!S1#68{B5bH1KXg+|d)}&fPJY%KFE&f1wp;Q< zW?NGGV=#^svT57<5D{C*N!QelSu9k*3i){cJi6st+fgy}erCEO4Yy*LXfK;Dbz%(a zN|-;{*#QT8(i+hiwLSrp9RZ>>c<-E`|eY$@ZS;z7?-%56zh6q1D5DSrWA-NMNn{-|N z>;m+#Sh6D-Scj)RnNkXmWzTCTk@<0do8aWM2i`LF#0DhBr^*Bpjk3M?T0cUI)H>3F zw~7+eT5_w?XL`q7ln=YRWrLMQX88Wz4aXm5H*nKo;!o4z%uAS*dx%dUi7zWjRXOE; zwhgM8^vmGn7J*q>aH8VV5uUB`mE*xxNe0(|mWzNy0mr#+T;^$;ADMsefD5i@@NiYB zMju8*#R72A`P{KC5K+*B8j*d=n>6XsP#lSp>f!v)# zc?Q#=#%E0(SH%o5`PoV{=Am~b%7YnP=wI+>wg~z z8?Tyuq|o`FcgZL6`oCOIftfyo6e%ol438K7d(1+Yj?*&6zZpI!+tys;xWHZYoB#BC zh~O_LH<=EBYDjYO?UUAobgtlE{~k1FU<9WEAv3MD^`+k^vf~+Q1Z1fHG2UXP;{R~d zdN@1kzfaUup3tTkK9>;_zc z;i3hQ-Bo26TeIeU8X`;Run`nQ!My;zCv-=p_iG`Eqa~WWld$082}zdmf7969Y@a06LO_0njSnQ9-O*L zkSGX^1^$h!e%nnY9NEQpba2b+2z-Mqqz|H|U|{t!S=ejHPdt4@0pJU9LL=h1LX+Az z>B>q4YEjO4QQskYJA`prKC-wQ8@CqtkzKv8Sn8d4Bugrkm4zi*dD_6W@ zYJ8TLXx|SHw0paw5csJjE^^+L^;D+v^5HqaTV7b4MjTDC@!0ZPkeInLrjb`q);{5p zm4yioUMp`JWJ;)S$UuMpi?}#K z{EN-<>^`yyyGBCaBjj2MQEv85(qm*4i5v^zVTThBGTB zPi^5{ltA;Rk7@QQGZC~r83AK6N6!Necnm9@okFJ5@n~MuMG7orc@HgE6{8cvMOpjn zbCK+tW|MVUP0A@J=cSN&cWBDGLm#ZGwP9TO`beDL5aQ7Wtdj4_n2cywS3`xa^m4l9 zh}C9IdHE#dO|@KAUaSNmv!ijr@OX`99_^E{Fzprq&dpdmbVRwVapebuhK63Me|or| z%>(Bb^7^FLI3}l&7h2B5q70bM*oo@LyzcJq8Q9@$LNSxaABL32AUcWr+?iHwUgyYr zjJI>k&vA;2j{wQz0gj2U9b=~BN6Rk88`PR_M7hdnE@kxip~VX|{RgKiP~klvnIHHhvsm4FJ^`d`C%QeUwAbo?%~aBWQ~<#(YNO(bP%l9 zhq|>f?gGd1XVV}-G`~|YE}ByhG~aAh!vMd!EGvC@bMY1pMW%A&R8gXUNhAQxj?~T6 zgYL99uuFSQs1xU+7Jx~6y-(ACU!b!-t!d?%*f)B$>f!S;A47qJ5QH(7Z9^yY|)dL=HNAcK_a~T z!7R0P`in0Ng1_Lz=pDy^ynUIC{tr>|($eFrE*ne0m*hRv6stM*R`T(I_>?z_u#ZWL zjobOs3o}^0i!Zh>d)!m_^X-0nE(}qH?YHK9YIZ4xC?-KMw@LjLus+!6D>+jtYmzxG zFOFw(%qVZqL&yMjYA_=hzOqUnbs@rO;gdRsVM$?jEXr|xqS_kjM^)PV`~_DA!m@yT z@PiBuw#M1vekxsmEL(1U6L71J?nx2fC>PXM3Ws~m-%oK^)AUpfKrQe2%{}9F1-c-X zz?sQf+z65@9cEV^uW;v1q)*t&(5J(}QkxX-QJX5cB599BGm#9G%Cnn>tI%jhmRr22^rCIJvY`4(Ki)>rxcD& zdk5|tlQJ-A2PR*<#GOl_H1M9(a|kPLJlVU?R;h?G6eyeIR9>W`c|L8*RD`YEfjJFG z1J&$-Ze=K$OT(V^HrvS6*_lJE6rsNT#XvJZyZeO>h1KodyNm~ZBeb%PKE-ZMyt$gP z&5|l0{j)&9W-Q?$k897MyyLqZ!SLB*9ackNe1o^TqhmBg5;Rm~wwA zp~hT|2WmZ6PP5;;)e$#X>{2cK?11bx5$S%+%7%$_=$f>%?JaAOAVI@52afyiC<)Sy-Q}d?Mh;I`w~fbf_7h&jl*!SM z_RxC-5P~=SqRU$YQ)y#3?ciRnd}EhB?w;tS+TFyAvBW>F+4rrlTv9T)rjomFA__F`(n|34XaDR3ZZFX zrzu&Jt}4dl^_1UvAsd*Wx_H{b!NeXpT~=O+Z!b{0TjS21I%4yfIa zqfZapb9i#)2_ePIfEi;XtsgYt@P^zW`o2F%dsY4Frv1mgKF{lK-(cQIxzP7Tter*9I9dM^J|kXk9{hjf3@{Q#7` z-_od+;~sEYNy*GaU(_D=X#jSR*-wh8$d7lvIXK@~FRLvPKN_R>j^atR%=s}rroJ&9 zu-66M95!-EPLy>*6^x^w6hOGwCIdiyAlWM|05?AE+QnbFnk*8%gGLrtkbBps=Xob; z88OTGSJC?HvppzU2eS4VvLTqtLfJZuX`z>XNe=30Sv{R!Ff$INjOdIwPr8W|Zt_@0 zRw_Gj=IRBENvg;a))a~pdvX;)5mR*+bK%87LE=#@nTIRvmI0emr;}pov!^D(>FSO6 z&9&96igJiKhS^q+cI|NGdtUam<640-ZgJxwba#-?tiy9qRN6VI_e{PZ$ui}GlN3 zsZ2YY`em5YCp&v>&5`1b4WkjKH{ZFU*0A>&7%K=E+9T* zfSWrSTmJ^@{1xWp@_@Q1J7f00Vh9j55l~p7L>&GJiFMdPV78ni^sk1^NuZp|%$E4@ z@9>W^a2do#57a{K{%HuJzyH)TqGGA58vkktVlqPJ_I9h`zY{XY!Z!@?p}Xe)8WK3V zlQ#;936%aFTq+O^(Sl@NEcw7c4MA+FLl9Qtmi!wY@f-IF*aT*P3IDH#AmRh6k1<{O j{~Ea`= Date: Mon, 31 Oct 2016 01:40:02 -0700 Subject: [PATCH 057/180] Add default cuda system path (#192) * DYLD_LIBRARY_PATH is disable after Mac OS X 10.11 * fix clang + gpu compile error on Mac OS * fix some words and errors in build docs --- cmake/flags.cmake | 51 ++++++++++++++++---- doc/build/build_from_source.md | 16 +++---- paddle/cuda/src/hl_dso_loader.cc | 81 +++++++++++++++++++++++--------- 3 files changed, 108 insertions(+), 40 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index dbad6be3f41b3..e087770991aef 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -21,12 +21,6 @@ function(safe_set_flag is_c src_list flag_name) endif() if(${safe_name}) set(${src_list} "${${src_list}} ${flag_name}" PARENT_SCOPE) - if(is_c) - set(CUDA_NVCC_FLAGS - --compiler-options;${flag_name} - ${CUDA_NVCC_FLAGS} - PARENT_SCOPE) - endif() endif() endfunction() @@ -40,6 +34,20 @@ macro(safe_set_cxxflag src_list flag_name) safe_set_flag(OFF ${src_list} ${flag_name}) endmacro() +# helper macro to set nvcc flag +macro(safe_set_nvflag flag_name) + string(REPLACE "-" "_" safe_name ${flag_name}) + string(REPLACE "=" "_" safe_name ${safe_name}) + CHECK_C_COMPILER_FLAG(${flag_name} C_COMPILER_SUPPORT_FLAG_${safe_name}) + set(safe_name C_COMPILER_SUPPORT_FLAG_${safe_name}) + if(${safe_name}) + set(CUDA_NVCC_FLAGS + --compiler-options;${flag_name} + ${CUDA_NVCC_FLAGS}) + endif() +endmacro() + + CHECK_CXX_SYMBOL_EXISTS(UINT64_MAX "stdint.h" UINT64_MAX_EXISTS) if(NOT UINT64_MAX_EXISTS) set(CMAKE_REQUIRED_DEFINITIONS -D__STDC_LIMIT_MACROS) @@ -63,20 +71,43 @@ set(COMMON_FLAGS -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wno-unused-parameter + -Wno-unused-function + -Wno-error=literal-suffix + -Wno-error=unused-local-typedefs) + +set(GPU_COMMON_FLAGS + -fPIC + -fno-omit-frame-pointer + -Wnon-virtual-dtor + -Wdelete-non-virtual-dtor + -Wno-unused-parameter + -Wno-unused-function -Wno-error=literal-suffix -Wno-error=unused-local-typedefs -Wno-error=unused-function # Warnings in Numpy Header. ) +if (APPLE) + # On Mac OS X build fat binaries with x86_64 architectures by default. + set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE) +else() + set(GPU_COMMON_FLAGS + -Wall + -Wextra + -Werror + ${GPU_COMMON_FLAGS}) +endif() + + foreach(flag ${COMMON_FLAGS}) safe_set_cflag(CMAKE_C_FLAGS ${flag}) safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag}) endforeach() -# On Mac OS X build fat binaries with x86_64 architectures by default. -if (APPLE) - set (CMAKE_OSX_ARCHITECTURES "x86_64" CACHE STRING "Build architectures for OSX" FORCE) -endif () +foreach(flag ${GPU_COMMON_FLAGS}) + safe_set_nvflag(${flag}) +endforeach() + # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # So, don't set these flags here. diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md index f7db0a9b92e67..7727c8c3788b9 100644 --- a/doc/build/build_from_source.md +++ b/doc/build/build_from_source.md @@ -153,12 +153,12 @@ As a simple example, consider the following: - **Only CPU** ```bash - cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF + cmake .. -DWITH_GPU=OFF ``` - **GPU** ```bash - cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF + cmake .. -DWITH_GPU=ON ``` - **GPU with doc and swig** @@ -171,7 +171,7 @@ Finally, you can build PaddlePaddle: ```bash # you can add build option here, such as: -cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX= +cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= # please use sudo make install, if you want to install PaddlePaddle into the system make -j `nproc` && make install # set PaddlePaddle installation path in ~/.bashrc @@ -246,7 +246,7 @@ easy_install pip ```bash sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local - sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* + sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib/libcudnn* ``` 2. Then you need to set DYLD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc. @@ -273,12 +273,12 @@ As a simple example, consider the following: - **Only CPU** ```bash - cmake .. -DWITH_GPU=OFF -DWITH_DOC=OFF + cmake .. -DWITH_GPU=OFF ``` - **GPU** ```bash - cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF + cmake .. -DWITH_GPU=ON ``` - **GPU with doc and swig** @@ -291,9 +291,9 @@ Finally, you can build PaddlePaddle: ```bash # you can add build option here, such as: -cmake .. -DWITH_GPU=ON -DWITH_DOC=OFF -DCMAKE_INSTALL_PREFIX= +cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= # please use sudo make install, if you want to install PaddlePaddle into the system -make -j `nproc` && make install +make -j `sysctl -n hw.ncpu` && make install # set PaddlePaddle installation path in ~/.bashrc export PATH=/bin:$PATH ``` diff --git a/paddle/cuda/src/hl_dso_loader.cc b/paddle/cuda/src/hl_dso_loader.cc index eee9984e07326..91c60d85a1e41 100644 --- a/paddle/cuda/src/hl_dso_loader.cc +++ b/paddle/cuda/src/hl_dso_loader.cc @@ -46,63 +46,100 @@ static inline std::string join(const std::string& part1, const std::string& part return ret; } -static inline void GetDsoHandleWithSearchPath( +static inline void GetDsoHandleFromDefaultPath( + std::string& dso_path, void** dso_handle, int dynload_flags) { + LOG(INFO) << "Try to find cuda library: " << dso_path + << "from default system path."; + // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH + *dso_handle = dlopen(dso_path.c_str(), dynload_flags); + + // DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to + // bring System Integrity Projection (SIP), if dso_handle + // is null, search from default package path in Mac OS. + #if defined(__APPLE__) or defined(__OSX__) + if (nullptr == *dso_handle) { + dso_path = join("/usr/local/cuda/lib/", dso_path); + *dso_handle = dlopen(dso_path.c_str(), dynload_flags); + if (nullptr == *dso_handle) { + if (dso_path == "libcudnn.dylib") { + LOG(FATAL) << "Note: [Recommend] copy cudnn into /usr/local/cuda/ \n" + << "For instance, sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C " + << "/usr/local \n sudo chmod a+r /usr/local/cuda/include/cudnn.h " + << "/usr/local/cuda/lib/libcudnn*"; + } + } + } + #endif +} + +static inline void GetDsoHandleFromSearchPath( const std::string& search_root, - const std::string& dso_path, + const std::string& dso_name, void** dso_handle) { int dynload_flags = RTLD_LAZY | RTLD_LOCAL; *dso_handle = nullptr; - std::string dlPath = dso_path; + std::string dlPath = dso_name; if (search_root.empty()) { - // default search xxx.so from LD_LIBRARY_PATH - *dso_handle = dlopen(dlPath.c_str(), dynload_flags); + GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); } else { // search xxx.so from custom path - dlPath = join(search_root, dso_path); + dlPath = join(search_root, dso_name); *dso_handle = dlopen(dlPath.c_str(), dynload_flags); - // then, search xxx.so from LD_LIBRARY_PATH - if (nullptr == *dso_handle) { - *dso_handle = dlopen(dso_path.c_str(), dynload_flags); + // if not found, search from default path + if (nullptr == dso_handle) { + LOG(WARNING) << "Failed to find cuda library: " << dlPath; + dlPath = dso_name; + GetDsoHandleFromDefaultPath(dlPath, dso_handle, dynload_flags); } } CHECK(nullptr != *dso_handle) - << "For Gpu version of PaddlePaddle, it couldn't find CUDA library: " - << dlPath.c_str() << ". Please make sure you already specify its path. " - << "Note: for training data on Cpu using Gpu version of PaddlePaddle, " - << "you must specify libcudart via export LD_LIBRARY_PATH for Linux or " - << "export DYLD_LIBRARY_PATH for MAC OS."; + << "Failed to find cuda library: " << dlPath << std::endl + << "Please specify its path correctly using one of the following ideas: \n" + + << "Idea 1. set cuda and cudnn lib path at runtime. " + << "http://www.paddlepaddle.org/doc/ui/cmd_argument/argument_outline.html \n" + << "For instance, issue command: paddle train --use_gpu=1 " + << "--cuda_dir=/usr/local/cudnn/lib --cudnn_dir=/usr/local/cudnn/lib ...\n" + + << "Idea 2. set environment variable LD_LIBRARY_PATH on Linux or " + << "DYLD_LIBRARY_PATH on Mac OS. \n" + << "For instance, issue command: export LD_LIBRARY_PATH=... \n" + + << "Note: After Mac OS 10.11, using the DYLD_LIBRARY_PATH is impossible " + << "unless System Integrity Protection (SIP) is disabled. However, @Idea 1" + << "always work well."; } void GetCublasDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleWithSearchPath(FLAGS_cuda_dir, "libcublas.dylib", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.dylib", dso_handle); #else - GetDsoHandleWithSearchPath(FLAGS_cuda_dir, "libcublas.so", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcublas.so", dso_handle); #endif } void GetCudnnDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleWithSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.dylib", dso_handle); #else - GetDsoHandleWithSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cudnn_dir, "libcudnn.so", dso_handle); #endif } void GetCudartDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleWithSearchPath("", "libcudart.dylib", dso_handle); + GetDsoHandleFromSearchPath("", "libcudart.dylib", dso_handle); #else - GetDsoHandleWithSearchPath("", "libcudart.so", dso_handle); + GetDsoHandleFromSearchPath("", "libcudart.so", dso_handle); #endif } void GetCurandDsoHandle(void** dso_handle) { #if defined(__APPLE__) || defined(__OSX__) - GetDsoHandleWithSearchPath(FLAGS_cuda_dir, "libcurand.dylib", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.dylib", dso_handle); #else - GetDsoHandleWithSearchPath(FLAGS_cuda_dir, "libcurand.so", dso_handle); + GetDsoHandleFromSearchPath(FLAGS_cuda_dir, "libcurand.so", dso_handle); #endif } From 12945b2c9017c88d2581253a6df83d9e35e2b804 Mon Sep 17 00:00:00 2001 From: backyes Date: Mon, 31 Oct 2016 23:42:59 +0800 Subject: [PATCH 058/180] Add glog header path to include (#295) --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index e96ce28248ee5..527064e31000a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -135,6 +135,7 @@ endif() if(WITH_GLOG) add_definitions(-DPADDLE_USE_GLOG) + include_directories(${LIBGLOG_INCLUDE_DIR}) endif() if(WITH_GFLAGS) From cdac60f616f4950aecd69a5aa326b196aa05e135 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 1 Nov 2016 06:40:26 +0000 Subject: [PATCH 059/180] add SpatialPyramidPoolLayer c++ support --- paddle/cuda/include/hl_cnn.h | 14 +- paddle/cuda/include/stub/hl_cnn_stub.h | 10 +- paddle/cuda/src/hl_cuda_cnn.cu | 40 +-- paddle/gserver/layers/PoolProjection.cpp | 81 +++++ paddle/gserver/layers/PoolProjection.h | 72 +++++ paddle/gserver/layers/Projection.h | 13 +- .../layers/SpatialPyramidPoolLayer.cpp | 128 ++++++++ .../gserver/layers/SpatialPyramidPoolLayer.h | 54 ++++ paddle/gserver/tests/test_LayerGrad.cpp | 32 +- paddle/math/Matrix.cpp | 288 +++++++++--------- proto/ModelConfig.proto.m4 | 12 + 11 files changed, 562 insertions(+), 182 deletions(-) create mode 100644 paddle/gserver/layers/PoolProjection.cpp create mode 100644 paddle/gserver/layers/PoolProjection.h create mode 100644 paddle/gserver/layers/SpatialPyramidPoolLayer.cpp create mode 100644 paddle/gserver/layers/SpatialPyramidPoolLayer.h diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index d19f4a4bb310a..4bd9d5e7c9e90 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -91,6 +91,7 @@ extern void hl_expand_feature2col( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] tgtData output data. + * @param[in] tgtStride output data stride. * */ extern void hl_maxpool_forward( @@ -100,7 +101,8 @@ extern void hl_maxpool_forward( const int pooledH, const int pooledW, const int sizeX, const int sizeY, const int strideH, const int strideW, - const int paddingH, const int paddingW, real* tgtData); + const int paddingH, const int paddingW, + real* tgtData, const int tgtStride); /** * @brief Maximum pool backward. @@ -123,6 +125,7 @@ extern void hl_maxpool_forward( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] targetGrad output grad. + * @param[in] outStride output grad data stride. * */ extern void hl_maxpool_backward( @@ -135,7 +138,7 @@ extern void hl_maxpool_backward( const int strideH, const int strideW, const int paddingH, const int paddingW, real scaleA, real scaleB, - real* targetGrad); + real* targetGrad, const int outStride); /** * @brief Averge pool forward. @@ -154,6 +157,7 @@ extern void hl_maxpool_backward( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] tgtData output data. + * @param[in] tgtStride output data stride. * */ extern void hl_avgpool_forward( @@ -163,7 +167,8 @@ extern void hl_avgpool_forward( const int pooledH, const int pooledW, const int sizeX, const int sizeY, const int strideH, const int strideW, - const int paddingH, const int paddingW, real* tgtData); + const int paddingH, const int paddingW, + real* tgtData, const int tgtStride); /** * @brief Maximum pool backward. @@ -184,6 +189,7 @@ extern void hl_avgpool_forward( * @param[in] scaleA scale. * @param[in] scaleB scale. * @param[out] backGrad output grad. + * @param[in] outStride output grad data stride. * */ extern void hl_avgpool_backward( @@ -195,7 +201,7 @@ extern void hl_avgpool_backward( const int strideH, const int strideW, int paddingH, int paddingW, real scaleA, real scaleB, - real* backGrad); + real* backGrad, const int outStride); /** * @brief Cross-map-respose normalize forward. diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index 5f696986e3c8f..4342c30376eeb 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -44,7 +44,8 @@ inline void hl_maxpool_forward( const int pooledH, const int pooledW, const int sizeX, const int sizeY, const int strideH, const int strideW, - const int paddingH, const int paddingW, real* tgtData) {} + const int paddingH, const int paddingW, + real* tgtData, const int tgtStride) {} inline void hl_maxpool_backward( const int frameCnt, const real* inputData, @@ -56,7 +57,7 @@ inline void hl_maxpool_backward( const int strideH, const int strideW, const int paddingH, const int paddingW, real scaleA, real scaleB, - real* targetGrad) {} + real* targetGrad, const int outStride) {} inline void hl_avgpool_forward( const int frameCnt, const real* inputData, @@ -65,7 +66,8 @@ inline void hl_avgpool_forward( const int pooledH, const int pooledW, const int sizeX, const int sizeY, const int strideH, const int strideW, - const int paddingH, const int paddingW, real* tgtData) {} + const int paddingH, const int paddingW, + real* tgtData, const int tgtStride) {} inline void hl_avgpool_backward( const int frameCnt, const real* outGrad, @@ -76,7 +78,7 @@ inline void hl_avgpool_backward( const int strideH, const int strideW, int paddingH, int paddingW, real scaleA, real scaleB, - real* backGrad) {} + real* backGrad, const int outStride) {} inline void hl_CMRNorm_forward( size_t frameCnt, const real* in, real* scale, real* out, diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index baa2fb0d27d74..fcef6a4436b5c 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -152,7 +152,7 @@ __global__ void KeMaxPoolForward(const int nthreads, const real* inputData, const int ksizeW, const int ksizeH, const int strideH, const int strideW, const int offsetH, const int offsetW, - real* tgtData) { + real* tgtData, const int tgtStride) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % pooledW; @@ -173,7 +173,9 @@ __global__ void KeMaxPoolForward(const int nthreads, const real* inputData, maxval = inputData[h * width + w]; } } - tgtData[index] = maxval; + int tgtIndex = index % (pooledW * pooledH * channels) + + frameNum * tgtStride; + tgtData[tgtIndex] = maxval; } } @@ -184,7 +186,7 @@ void hl_maxpool_forward(const int frameCnt, const real* inputData, const int sizeX, const int sizeY, const int strideH, const int strideW, const int paddingH, const int paddingW, - real* tgtData) { + real* tgtData, const int tgtStride) { int num_kernels = pooledH * pooledW * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; @@ -194,7 +196,7 @@ void hl_maxpool_forward(const int frameCnt, const real* inputData, KeMaxPoolForward<<< grid, threads, 0, STREAM_DEFAULT >>> (num_kernels, inputData, channels, height, width, pooledH, pooledW, sizeX, sizeY, strideH, strideW, - paddingH, paddingW, tgtData); + paddingH, paddingW, tgtData, tgtStride); CHECK_SYNC("hl_maxpool_forward failed"); } @@ -207,7 +209,7 @@ __global__ void KeMaxPoolBackward(const int nthreads, const real* inputData, const int strideH, const int strideW, const int padH, const int padW, real scaleA, real scaleB, - real* targetGrad) { + real* targetGrad, const int outStride) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { // find out the local index @@ -223,8 +225,8 @@ __global__ void KeMaxPoolBackward(const int nthreads, const real* inputData, int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0; real gradient = 0; real input = inputData[index]; - outData += (frameNum * channels + offsetC) * pooledH * pooledW; - outGrad += (frameNum * channels + offsetC) * pooledH * pooledW; + outData += (frameNum * outStride + offsetC * pooledH * pooledW); + outGrad += (frameNum * outStride + offsetC * pooledH * pooledW); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (input == outData[ph * pooledW + pw]) { @@ -246,7 +248,7 @@ void hl_maxpool_backward(const int frameCnt, const real* inputData, const int strideH, const int strideW, const int paddingH, const int paddingW, real scaleA, real scaleB, - real* targetGrad) { + real* targetGrad, const int outStride) { int num_kernels = height * width * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; @@ -257,7 +259,7 @@ void hl_maxpool_backward(const int frameCnt, const real* inputData, strideH, strideW, paddingH, paddingW, scaleA, scaleB, - targetGrad); + targetGrad, outStride); CHECK_SYNC("hl_maxpool_backward"); } @@ -268,7 +270,7 @@ __global__ void KeAvgPoolForward(const int nthreads, const real* inputData, const int sizeX, const int sizeY, const int strideH, const int strideW, const int padH, const int padW, - real* tgtData) { + real* tgtData, const int tgtStride) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int pw = index % pooledW; @@ -293,7 +295,9 @@ __global__ void KeAvgPoolForward(const int nthreads, const real* inputData, aveval += inputData[h * width + w]; } } - tgtData[index] = aveval / pool_size; + int tgtIndex = index % (pooledW * pooledH * channels) + + frameNum * tgtStride; + tgtData[tgtIndex] = aveval / pool_size; } } @@ -303,14 +307,15 @@ void hl_avgpool_forward(const int frameCnt, const real* inputData, const int pooledH, const int pooledW, const int sizeX, const int sizeY, const int strideH, const int strideW, - const int paddingH, const int paddingW, real* tgtData) { + const int paddingH, const int paddingW, + real* tgtData, const int tgtStride) { int num_kernels = pooledH * pooledW * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; KeAvgPoolForward<<< blocks, 1024, 0, STREAM_DEFAULT >>> (num_kernels, inputData, channels, height, width, pooledH, pooledW, sizeX, sizeY, strideH, strideW, - paddingH, paddingW, tgtData); + paddingH, paddingW, tgtData, tgtStride); CHECK_SYNC("hl_avgpool_forward failed"); } @@ -322,7 +327,7 @@ __global__ void KeAvgPoolBackward(const int nthreads, const real* outGrad, const int strideH, const int strideW, const int padH, const int padW, real scaleA, real scaleB, - real* tgtGrad) { + real* tgtGrad, const int outStride) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < nthreads) { int offsetW = index % width + padW; @@ -335,7 +340,8 @@ __global__ void KeAvgPoolBackward(const int nthreads, const real* outGrad, int phend = offsetH >= 0 ? min(offsetH / strideH + 1, pooledH) : 0; int pwend = offsetW >= 0 ? min(offsetW / strideW + 1, pooledW) : 0; real gradient = 0; - outGrad += (frameNum * channels + offsetC) * pooledH * pooledW; + outGrad += (frameNum * outStride + offsetC * pooledH * pooledW); + for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { @@ -360,7 +366,7 @@ void hl_avgpool_backward(const int frameCnt, const real* outGrad, const int strideH, const int strideW, const int paddingH, const int paddingW, real scaleA, real scaleB, - real* backGrad) { + real* backGrad, const int outStride) { int num_kernels = height * width * channels * frameCnt; int blocks = (num_kernels + 1024 - 1) / 1024; @@ -370,7 +376,7 @@ void hl_avgpool_backward(const int frameCnt, const real* outGrad, strideH, strideW, paddingH, paddingW, scaleA, scaleB, - backGrad); + backGrad, outStride); CHECK_SYNC("hl_avgpool_backward failed"); } diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp new file mode 100644 index 0000000000000..50059ee04d39b --- /dev/null +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "PoolProjection.h" + +namespace paddle { + +REGISTER_PROJECTION_CREATE_FUNC(pool2, &PoolProjection::create); + +PoolProjection* PoolProjection::create(const ProjectionConfig& config, + ParameterPtr parameter, bool useGpu) { + const std::string& pool = config.pool_conf().pool_type(); + if (pool == "max") { + return new MaxPoolProjection(config, parameter, useGpu); + } else if (pool == "avg") { + return new AvgPoolProjection(config, parameter, useGpu); + } else { + LOG(FATAL) << "Unknown pool type: " << pool; + return nullptr; + } +} + +void MaxPoolProjection::forward() { + MatrixPtr inputV = in_->value; + MatrixPtr outV = out_->value; + outV->maxPoolForward(*inputV, imgSizeY_, imgSize_, channels_, + sizeX_, sizeY_, strideY_, stride_, + outputY_, outputX_, confPaddingY_, confPadding_); +} + +void MaxPoolProjection::backward(const UpdateCallback& callback) { + (void)callback; + MatrixPtr outGrad = out_->grad; + MatrixPtr inputV = in_->value; + MatrixPtr outV = out_->value; + MatrixPtr inputGrad = in_->grad; + + if (NULL == inputGrad) { + return; + } + inputGrad->maxPoolBackward(*inputV, imgSizeY_, imgSize_, *outGrad, *outV, + sizeX_, sizeY_, + strideY_, stride_, outputY_, outputX_, 1, 1, + confPaddingY_, confPadding_); +} + +void AvgPoolProjection::forward() { + MatrixPtr inputV = in_->value; + MatrixPtr outV = out_->value; + outV->avgPoolForward(*inputV, imgSizeY_, imgSize_, channels_, + sizeX_, sizeY_, strideY_, stride_, + outputY_, outputX_, confPaddingY_, confPadding_); +} + +void AvgPoolProjection::backward(const UpdateCallback& callback) { + (void)callback; + + MatrixPtr outputGrad = out_->grad; + MatrixPtr inputGrad = in_->grad; + + if (NULL == inputGrad) { + return; + } + + inputGrad->avgPoolBackward(*outputGrad, imgSizeY_, imgSize_, + sizeX_, sizeY_, strideY_, stride_, + outputY_, outputX_, 1, 1, + confPaddingY_, confPadding_); +} +} // namespace paddle diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h new file mode 100644 index 0000000000000..73d8a41aefabe --- /dev/null +++ b/paddle/gserver/layers/PoolProjection.h @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "Projection.h" + +namespace paddle { + +class PoolProjection : public Projection { +protected: + size_t imgSizeY_, imgSize_; + size_t outputY_, outputX_; + size_t strideY_, stride_; + size_t sizeY_, sizeX_; + int confPaddingY_, confPadding_; + size_t channels_; + std::string poolType_; + +public: + PoolProjection(const ProjectionConfig& config, ParameterPtr parameter, + bool useGpu) + : Projection(config, parameter, useGpu) { + const PoolConfig& conf = config_.pool_conf(); + poolType_ = conf.pool_type(); + channels_ = conf.channels(); + sizeX_ = conf.size_x(); + stride_ = conf.stride(); + outputX_ = conf.output_x(); + imgSize_ = conf.img_size(); + confPadding_ = conf.padding(); + + sizeY_ = conf.has_size_y() ? conf.size_y() : conf.size_x(); + imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride(); + confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); + outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); + } + static PoolProjection* create(const ProjectionConfig& config, + ParameterPtr parameter, bool useGpu); + const std::string& getPoolType() const { return poolType_; } +}; + +class MaxPoolProjection : public PoolProjection { +public: + MaxPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, + bool useGpu) + : PoolProjection(config, parameter, useGpu) {} + virtual void forward(); + virtual void backward(const UpdateCallback& callback = nullptr); +}; + +class AvgPoolProjection : public PoolProjection { +public: + AvgPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, + bool useGpu) + : PoolProjection(config, parameter, useGpu) {} + virtual void forward(); + virtual void backward(const UpdateCallback& callback = nullptr); +}; +} // namespace paddle diff --git a/paddle/gserver/layers/Projection.h b/paddle/gserver/layers/Projection.h index 3fa3a0cc230ac..203edc5396a53 100644 --- a/paddle/gserver/layers/Projection.h +++ b/paddle/gserver/layers/Projection.h @@ -12,12 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once -#include "paddle/parameter/Parameter.h" -#include "ModelConfig.pb.h" #include "Layer.h" +#include "ModelConfig.pb.h" +#include "paddle/parameter/Parameter.h" namespace paddle { @@ -28,6 +27,11 @@ namespace paddle { Projection::registrar_.registerClass<__class_name>(#__type_name); \ }) +#define REGISTER_PROJECTION_CREATE_FUNC(__type_name, createFunction) \ + static InitFunction __reg_type_##__type_name([]() { \ + Projection::registrar_.registerClass(#__type_name, createFunction); \ + }) + /** * A projection takes one Argument as input, calculate the result and add it * to output Argument. @@ -50,7 +54,8 @@ class Projection { registrar_; /** - * Forward propagation. If backward() will be called, in and out must be kept valid until then. + * Forward propagation. If backward() will be called, in and out must be kept + * valid until then. * @param in input of projection * @param out output of projection * @param passType PASS_TRAIN of PASS_TEST diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp new file mode 100644 index 0000000000000..bcdba5c151175 --- /dev/null +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp @@ -0,0 +1,128 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "SpatialPyramidPoolLayer.h" + +namespace paddle { + +REGISTER_LAYER(spp, SpatialPyramidPoolLayer); + +ProjectionConfig SpatialPyramidPoolLayer::getConfig(size_t imgSizeW, + size_t imgSizeH, + size_t channels, + size_t pyramidLevel, + std::string& poolType) { + ProjectionConfig config; + config.set_type("pool2"); + PoolConfig* conf = config.mutable_pool_conf(); + conf->set_channels(channels); + conf->set_img_size(imgSizeW); + conf->set_img_size_y(imgSizeH); + conf->set_pool_type(poolType); + + int numBins = std::pow(2, pyramidLevel); + + int sizeH = std::ceil(imgSizeH / static_cast(numBins)); + int remainderH = sizeH * numBins - imgSizeH; + int paddingH = (remainderH + 1) / 2; + int outSizeH = outputSize(imgSizeH, sizeH, paddingH, sizeH); + + int sizeW = std::ceil(imgSizeW / static_cast(numBins)); + int remainderW = sizeW * numBins - imgSizeW; + int paddingW = (remainderW + 1) / 2; + int outSizeW = outputSize(imgSizeW, sizeW, paddingW, sizeW); + + conf->set_stride(sizeW); + conf->set_stride_y(sizeH); + conf->set_size_x(sizeW); + conf->set_size_y(sizeH); + conf->set_padding(paddingW); + conf->set_padding_y(paddingH); + conf->set_output_x(outSizeW); + conf->set_output_y(outSizeH); + config.set_output_size(outSizeH * outSizeW * channels); + return config; +} + +void SpatialPyramidPoolLayer::splitInput(Argument& input, size_t height, + size_t width, bool useGpu) { + input.value = getInput(0).value; + if (passType_ != PASS_TEST && needGradient()) { + Matrix::resizeOrCreate(input.grad, height, width, /* trans */ false, + useGpu); + input.grad->zeroMem(); + } +} + +bool SpatialPyramidPoolLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + Layer::init(layerMap, parameterMap); + CHECK_EQ(config_.inputs_size(), 1); + + const SppConfig& sppConf = config_.inputs(0).spp_conf(); + pyramidHeight_ = sppConf.pyramid_height(); + poolType_ = sppConf.pool_type(); + + channels_ = sppConf.channels(); + imgSizeW_ = sppConf.img_size(); + imgSizeH_ = sppConf.has_img_size_y() ? sppConf.img_size_y() : imgSizeW_; + poolProjections_.reserve(pyramidHeight_); + projCol_.reserve(pyramidHeight_); + projInput_.reserve(pyramidHeight_); + projOutput_.resize(pyramidHeight_); + + size_t startCol = 0; + size_t endCol = 0; + for (size_t i = 0; i < pyramidHeight_; i++) { + poolProjections_.emplace_back(PoolProjection::create( + getConfig(imgSizeW_, imgSizeH_, channels_, i, poolType_), + nullptr, useGpu_)); + endCol += poolProjections_[i]->getOutputSize(); + projCol_.push_back(std::make_pair(startCol, endCol)); + startCol = endCol; + projInput_.emplace_back(Argument()); + } + outputSize_ = endCol; + return true; +} + +void SpatialPyramidPoolLayer::forward(PassType passType) { + Layer::forward(passType); + + int batchSize = getInput(0).getBatchSize(); + resetOutput(batchSize, outputSize_); + for (size_t i = 0; i < pyramidHeight_; i++) { + size_t startCol = projCol_[i].first; + size_t endCol = projCol_[i].second; + projOutput_[i].value = output_.value->subColMatrix(startCol, endCol); + projOutput_[i].grad = output_.grad->subColMatrix(startCol, endCol); + splitInput(projInput_[i], getInput(0).value->getHeight(), + getInput(0).value->getWidth(), useGpu_); + } + for (size_t i = 0; i < pyramidHeight_; i++) { + poolProjections_[i]->forward(&projInput_[i], &projOutput_[i], passType); + } +} + +void SpatialPyramidPoolLayer::backward(const UpdateCallback& callback) { + for (size_t i = 0; i < pyramidHeight_; i++) { + if (poolProjections_[i]) { + poolProjections_[i]->backward(callback); + getInput(0).grad->add(*projInput_[i].grad); + } + } +} + +} // namespace paddle + diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/gserver/layers/SpatialPyramidPoolLayer.h new file mode 100644 index 0000000000000..de1fd4da07dd8 --- /dev/null +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.h @@ -0,0 +1,54 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#pragma once + +#include "Layer.h" +#include "PoolProjection.h" +#include "paddle/utils/Logging.h" + +namespace paddle { + +class SpatialPyramidPoolLayer : public Layer { +protected: + size_t channels_; + size_t imgSizeW_; + size_t imgSizeH_; + size_t pyramidHeight_; + size_t outputSize_; + std::string poolType_; + + std::vector> poolProjections_; + std::vector projInput_; + std::vector projOutput_; + std::vector> projCol_; + +public: + explicit SpatialPyramidPoolLayer(const LayerConfig& config) : Layer(config) {} + ~SpatialPyramidPoolLayer() {} + + virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + ProjectionConfig getConfig(size_t sizeX_, size_t sizeY_, size_t channels, + size_t pyamidLevel_, std::string& poolType_); + + int outputSize(int imageSize, int windowSize, int padding, int stride) { + return (imageSize - windowSize + 2 * padding) / stride + 1; + } + + virtual void forward(PassType passType); + virtual void backward(const UpdateCallback& callback = nullptr); + void splitInput(Argument& input, size_t height, size_t width, bool useGpu); +}; +} // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index eab9bf84141a2..3d633f4b72797 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -13,14 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include #include -#include "paddle/gserver/layers/DataLayer.h" +#include #include "ModelConfig.pb.h" +#include "paddle/gserver/layers/DataLayer.h" #include "paddle/trainer/Trainer.h" -#include "TestUtil.h" #include "LayerGradUtil.h" +#include "TestUtil.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -880,6 +880,32 @@ TEST(Layer, PoolLayer) { #endif } +void testSppLayer(const string& poolType, const int pyramidHeight, bool trans, + bool useGpu) { + TestConfig config; + config.layerConfig.set_type("spp"); + config.inputDefs.push_back({INPUT_DATA, "layer_0", 3200, 0}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + SppConfig* sppConfig = input->mutable_spp_conf(); + sppConfig->set_pool_type(poolType); + sppConfig->set_pyramid_height(pyramidHeight); + sppConfig->set_channels(16); + sppConfig->set_img_size(10); + sppConfig->set_img_size_y(20); + testLayerGrad(config, "spp", 100, trans, useGpu); +} + +TEST(Layer, SpatialPyramidPoolLayer) { + for (auto useGpu : {false, true}) { + testSppLayer("avg", 1, false, useGpu); + testSppLayer("avg", 3, false, useGpu); + testSppLayer("avg", 5, false, useGpu); + testSppLayer("max", 1, false, useGpu); + testSppLayer("max", 3, false, useGpu); + testSppLayer("avg", 5, false, useGpu); + } +} + TEST(Layer, rankCostLayer) { TestConfig config; config.layerConfig.set_type("rank-cost"); diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 843eabc97d642..ddf99f6f2974c 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -13,19 +13,19 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Matrix.h" +#include "MathFunctions.h" #include "SparseMatrix.h" #include "SparseRowMatrix.h" -#include "MathFunctions.h" -#include #include #include +#include -#include "paddle/utils/Logging.h" #include #include "hl_gpu.h" #include "hl_table_apply.h" #include "hl_top_k.h" +#include "paddle/utils/Logging.h" #include "paddle/utils/ThreadLocal.h" @@ -42,9 +42,9 @@ inline real _safelog(real a) { return a > 0.0f ? std::log(a) : -40.0f; } Matrix::Matrix(MemoryHandlePtr memHandle, size_t height, size_t width, bool trans, bool use_gpu) : BaseMatrix( - height, width, - memHandle ? (reinterpret_cast(memHandle->getBuf())) : nullptr, - trans, use_gpu) { + height, width, + memHandle ? (reinterpret_cast(memHandle->getBuf())) : nullptr, + trans, use_gpu) { elementCnt_ = width * height; memoryHandle_ = memHandle; } @@ -95,7 +95,7 @@ MatrixPtr Matrix::create(MemoryHandlePtr memHandle, size_t height, size_t width, if (auto gpuHandle = std::dynamic_pointer_cast(memHandle)) { return std::make_shared(gpuHandle, height, width, trans); } else if (auto cpuHandle = - std::dynamic_pointer_cast(memHandle)) { + std::dynamic_pointer_cast(memHandle)) { return std::make_shared(cpuHandle, height, width, trans); } else { LOG(FATAL) << "Wrong"; @@ -343,19 +343,17 @@ void GpuMatrix::addBias(Matrix& b, real scale) { void GpuMatrix::collectBias(Matrix& a, real scale) { CHECK_EQ(getHeight(), (size_t)1); CHECK_EQ(width_, a.getWidth()); - GpuSparseMatrix* sMatPtr = dynamic_cast(&a); + GpuSparseMatrix* sMatPtr = dynamic_cast(&a); if (!sMatPtr) { sumCols(a, scale); } else { real* data = getData(); hl_sparse_matrix_s A_d = sMatPtr->sMatrix_.get(); - hl_sparse_matrix_column_sum(data, A_d, sMatPtr->getHeight(), - width_, scale); + hl_sparse_matrix_column_sum(data, A_d, sMatPtr->getHeight(), width_, scale); } } -void GpuMatrix::sequenceAvgForward(Matrix& a, - const IVector& startsPos, +void GpuMatrix::sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode) { size_t height = getHeight(); size_t width = getWidth(); @@ -401,8 +399,8 @@ void GpuMatrix::mul(const GpuMatrix& a, const GpuMatrix& b, real scaleAB, hl_trans_op_t transa = !a.isTransposed() ? HPPL_OP_N : HPPL_OP_T; hl_trans_op_t transb = !b.isTransposed() ? HPPL_OP_N : HPPL_OP_T; - hl_matrix_mul(A_d, transa, B_d, transb, C_d, dimM, dimN, dimK, - scaleAB, scaleT, lda, ldb, ldc); + hl_matrix_mul(A_d, transa, B_d, transb, C_d, dimM, dimN, dimK, scaleAB, + scaleT, lda, ldb, ldc); } void GpuMatrix::mul(const GpuSparseMatrix& a, const GpuMatrix& b, real scaleAB, @@ -423,8 +421,8 @@ void GpuMatrix::mul(const GpuSparseMatrix& a, const GpuMatrix& b, real scaleAB, hl_sparse_matrix_s A_d = a.sMatrix_.get(); real* B_d = b.data_; real* C_d = data_; - hl_matrix_csr_mul_dense(A_d, transA, B_d, HPPL_OP_N, C_d, height_, - width_, b.height_, scaleAB, scaleT); + hl_matrix_csr_mul_dense(A_d, transA, B_d, HPPL_OP_N, C_d, height_, width_, + b.height_, scaleAB, scaleT); } void GpuMatrix::mul(const GpuMatrix& a, const GpuSparseMatrix& b, real scaleAB, @@ -445,11 +443,11 @@ void GpuMatrix::mul(const GpuMatrix& a, const GpuSparseMatrix& b, real scaleAB, << "Matrix dimensions are not equal"; } if (b.format_ == SPARSE_CSC) { - hl_matrix_dense_mul_csc(A_d, HPPL_OP_N, B_d, transB, C_d, height_, - width_, a.width_, scaleAB, scaleT); + hl_matrix_dense_mul_csc(A_d, HPPL_OP_N, B_d, transB, C_d, height_, width_, + a.width_, scaleAB, scaleT); } else { - hl_matrix_dense_mul_csr(A_d, HPPL_OP_N, B_d, transB, C_d, height_, - width_, a.width_, scaleAB, scaleT); + hl_matrix_dense_mul_csr(A_d, HPPL_OP_N, B_d, transB, C_d, height_, width_, + a.width_, scaleAB, scaleT); } } @@ -511,8 +509,8 @@ void GpuMatrix::selectRows(Matrix& table, IVector& ids) { size_t tableSize = table.getHeight(); int* index = ids.getData(); - hl_matrix_select_rows(a, stride_, table.getData(), table.stride_, - index, numSamples, tableSize, dim); + hl_matrix_select_rows(a, stride_, table.getData(), table.stride_, index, + numSamples, tableSize, dim); #endif } @@ -529,8 +527,8 @@ void GpuMatrix::addToRows(Matrix& table, IVector& ids) { size_t tableSize = table.getHeight(); int* index = ids.getData(); - hl_matrix_add_to_rows(table.getData(), table.stride_, a, stride_, - index, numSamples, tableSize, dim); + hl_matrix_add_to_rows(table.getData(), table.stride_, a, stride_, index, + numSamples, tableSize, dim); #endif } @@ -565,13 +563,8 @@ void GpuMatrix::rowMax(IVector& maxIds, Matrix& maxVal) { CHECK_EQ(maxIds.getSize(), numSamples * beam); CHECK_EQ(maxVal.getHeight(), numSamples); - hl_matrix_top_k(maxVal.getData(), - maxVal.getStride(), - maxIds.getData(), - this->getData(), - this->getStride(), - this->getWidth(), - beam, + hl_matrix_top_k(maxVal.getData(), maxVal.getStride(), maxIds.getData(), + this->getData(), this->getStride(), this->getWidth(), beam, numSamples); #endif } @@ -595,12 +588,12 @@ void GpuMatrix::maxoutForward(Matrix& a, IVector& id, size_t channels, size_t size = getWidth(); size_t batchSize = getHeight(); - const real* input = a.getData(); + const real* input = a.getData(); real* output = getData(); int* idForGpu = id.getData(); - hl_maxout_forward(input, output, idForGpu, batchSize, size, - size / channels, groups); + hl_maxout_forward(input, output, idForGpu, batchSize, size, size / channels, + groups); } void GpuMatrix::maxoutBackward(Matrix& a, IVector& id, size_t channels, @@ -611,12 +604,12 @@ void GpuMatrix::maxoutBackward(Matrix& a, IVector& id, size_t channels, size_t size = a.getWidth(); size_t batchSize = getHeight(); - real* input = getData(); + real* input = getData(); const real* output = a.getData(); const int* idForGpu = id.getData(); - hl_maxout_backward(input, output, idForGpu, batchSize, size, - size / channels, groups); + hl_maxout_backward(input, output, idForGpu, batchSize, size, size / channels, + groups); } /*calulate the error of classification */ @@ -632,8 +625,8 @@ void GpuMatrix::classificationError(MatrixPtr output, IVectorPtr label) { real* recResult_d = data_; int* label_d = label_ptr->getData(); - hl_matrix_classification_error(output_d, label_d, recResult_d, - height_, output_ptr->width_); + hl_matrix_classification_error(output_d, label_d, recResult_d, height_, + output_ptr->width_); } /* copy -log(output[i * width + label]) to this->data[i] */ @@ -702,8 +695,7 @@ void GpuMatrix::sequenceSoftmax(Matrix& output, const IVector& index) { real* outputData = output.getData(); auto starts = index.getData(); int numSequences = index.getSize() - 1; - hl_sequence_softmax_forward(inputData, outputData, - starts, numSequences); + hl_sequence_softmax_forward(inputData, outputData, starts, numSequences); } void GpuMatrix::softmaxDerivative(Matrix& output, Matrix& sftmaxSum) { @@ -717,8 +709,7 @@ void GpuMatrix::softmaxDerivative(Matrix& output, Matrix& sftmaxSum) { real* output_d = output.data_; real* sftmaxSum_d = sftmaxSum.data_; real* grad_d = data_; - hl_matrix_softmax_derivative(grad_d, output_d, sftmaxSum_d, height_, - width_); + hl_matrix_softmax_derivative(grad_d, output_d, sftmaxSum_d, height_, width_); } void GpuMatrix::softmaxBackward(Matrix& outputV) { @@ -769,7 +760,7 @@ void GpuMatrix::scaledTanh(Matrix& output, real p1, real p2) { } void GpuMatrix::cosSim(Matrix& output1, Matrix& output2, real scale) { CHECK(output1.useGpu_ == true && output2.useGpu_ == true) - << "Matrix type are not equal"; + << "Matrix type are not equal"; size_t numSamples = getHeight(); size_t dim = output1.getWidth(); CHECK_EQ(getWidth(), 1UL); @@ -778,15 +769,15 @@ void GpuMatrix::cosSim(Matrix& output1, Matrix& output2, real scale) { real* out = getData(); real* x = output1.getData(); real* y = output2.getData(); - hl_cossim(out, x, y, - dim, output1.getHeight(), output2.getHeight(), scale); + hl_cossim(out, x, y, dim, output1.getHeight(), output2.getHeight(), scale); } void GpuMatrix::cosSimDerivative(Matrix& output, Matrix& prevOut1, Matrix& prevOut2, Matrix& prevGrad1, Matrix& prevGrad2, real scale) { CHECK(output.useGpu_ == true && prevOut1.useGpu_ == true && prevOut2.useGpu_ == true && prevGrad1.useGpu_ == true && - prevGrad2.useGpu_ == true) << "Matrix type are not equal"; + prevGrad2.useGpu_ == true) + << "Matrix type are not equal"; CHECK_EQ(getWidth(), 1UL); CHECK_EQ(output.getWidth(), 1UL); @@ -806,9 +797,8 @@ void GpuMatrix::cosSimDerivative(Matrix& output, Matrix& prevOut1, real* prevOutY = prevOut2.getData(); real* prevGradX = prevGrad1.getData(); real* prevGradY = prevGrad2.getData(); - hl_cossim_derivative(grad, out, prevOutX, prevOutY, - prevGradX, prevGradY, dim, - prevOut1.getHeight(), prevOut2.getHeight(), scale); + hl_cossim_derivative(grad, out, prevOutX, prevOutY, prevGradX, prevGradY, dim, + prevOut1.getHeight(), prevOut2.getHeight(), scale); } void GpuMatrix::randomizeUniform() { @@ -859,8 +849,8 @@ void GpuMatrix::check(std::ostream& os, Matrix& refMat, bool printDiff) { void GpuMatrix::convExpand(Matrix& feature, int feaImgHeight, int feaImgWidth, int channels, int blockH, int blockW, int strideH, - int strideW, int paddingH, int paddingW, - int outputH, int outputW) { + int strideW, int paddingH, int paddingW, int outputH, + int outputW) { CHECK(feature.useGpu_ == true) << "Matrix type are not equal"; CHECK_EQ(size_t(feaImgHeight * feaImgWidth * channels), @@ -870,17 +860,16 @@ void GpuMatrix::convExpand(Matrix& feature, int feaImgHeight, int feaImgWidth, size_t elemCnt = outputH * outputW * blockH * blockW * channels; CHECK_EQ(elemCnt, height_ * width_) << "Matrix dimensions are not equal"; - hl_expand_feature2col(feature.getData(), channels, feaImgHeight, - feaImgWidth, blockH, blockW, strideH, strideW, - paddingH, paddingW, outputH, outputW, - getData()); + hl_expand_feature2col(feature.getData(), channels, feaImgHeight, feaImgWidth, + blockH, blockW, strideH, strideW, paddingH, paddingW, + outputH, outputW, getData()); } void GpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, int thisImgWidth, int channels, int blockH, int blockW, int strideH, int strideW, int paddingH, - int paddingW, int outputH, int outputW, - real alpha, real beta) { + int paddingW, int outputH, int outputW, real alpha, + real beta) { CHECK(expandFeat.useGpu_ == true) << "Matrix type are not equal"; CHECK_EQ(size_t(thisImgHeight * thisImgWidth * channels), getHeight() * getWidth()) @@ -889,18 +878,17 @@ void GpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, size_t elemCnt = outputH * outputW * blockW * blockH * channels; CHECK(elemCnt == expandFeat.getHeight() * expandFeat.getWidth()) << "Matrix dimensions are not equal"; - hl_shrink_col2feature( - expandFeat.getData(), channels, thisImgHeight, thisImgWidth, blockH, - blockW, strideH, strideW, paddingH, paddingW, outputH, outputW, - getData(), alpha, beta); + hl_shrink_col2feature(expandFeat.getData(), channels, thisImgHeight, + thisImgWidth, blockH, blockW, strideH, strideW, + paddingH, paddingW, outputH, outputW, getData(), alpha, + beta); } void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, - size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, - size_t paddingH, size_t paddingW) { + size_t imgSizeW, size_t channels, size_t sizeX, + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, size_t paddingH, + size_t paddingW) { CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal"; real* inputData = inputMat.getData(); @@ -911,16 +899,15 @@ void GpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, CHECK(height_ == inputMat.getHeight()); CHECK(width_ == outputH * outputW * channels); - hl_maxpool_forward(frameNum, inputData, channels, height, width, - outputH, outputW, sizeX, sizeY, strideH, strideW, - paddingH, paddingW, data_); + hl_maxpool_forward(frameNum, inputData, channels, height, width, outputH, + outputW, sizeX, sizeY, strideH, strideW, paddingH, + paddingW, data_, getStride()); } void GpuMatrix::maxPoolBackward(Matrix& inputMat, size_t imgSizeH, size_t imgSizeW, Matrix& outGrad, Matrix& outV, - size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, + size_t sizeX, size_t sizeY, size_t strideH, + size_t strideW, size_t outputH, size_t outputW, real scaleTargets, real scaleOutput, size_t paddingH, size_t paddingW) { CHECK(inputMat.useGpu_ == true && outGrad.useGpu_ == true && @@ -940,19 +927,17 @@ void GpuMatrix::maxPoolBackward(Matrix& inputMat, size_t imgSizeH, CHECK(outGrad.getHeight() == outV.getHeight() && outGrad.getWidth() == outV.getWidth()); - - hl_maxpool_backward(frameNum, inputData, outData, outDiff, channels, - height, width, outputH, outputW, sizeX, sizeY, - strideH, strideW, paddingH, paddingW, - scaleTargets, scaleOutput, data_); + hl_maxpool_backward(frameNum, inputData, outData, outDiff, channels, height, + width, outputH, outputW, sizeX, sizeY, strideH, strideW, + paddingH, paddingW, scaleTargets, scaleOutput, data_, + outGrad.getStride()); } void GpuMatrix::avgPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, - size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, - size_t paddingH, size_t paddingW) { + size_t imgSizeW, size_t channels, size_t sizeX, + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, size_t paddingH, + size_t paddingW) { CHECK(inputMat.useGpu_ == true) << "Matrix type are not equal"; real* inputData = inputMat.getData(); @@ -963,18 +948,17 @@ void GpuMatrix::avgPoolForward(Matrix& inputMat, size_t imgSizeH, CHECK(height_ == inputMat.getHeight()); CHECK(width_ == outputH * outputW * channels); - hl_avgpool_forward(frameNum, inputData, channels, height, width, - outputH, outputW, sizeX, sizeY, - strideH, strideW, - paddingH, paddingW, data_); + hl_avgpool_forward(frameNum, inputData, channels, height, width, outputH, + outputW, sizeX, sizeY, strideH, strideW, paddingH, + paddingW, data_, getStride()); } void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, size_t imgSizeW, size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, - real scaleTargets, real scaleOutput, - size_t paddingH, size_t paddingW) { + size_t strideH, size_t strideW, size_t outputH, + size_t outputW, real scaleTargets, + real scaleOutput, size_t paddingH, + size_t paddingW) { CHECK(outGrad.useGpu_ == true) << "Matrix type are not equal"; real* outDiff = outGrad.getData(); @@ -986,11 +970,10 @@ void GpuMatrix::avgPoolBackward(Matrix& outGrad, size_t imgSizeH, CHECK(height_ == outGrad.getHeight()); CHECK(outGrad.getWidth() == outputH * outputW * channels); - hl_avgpool_backward(frameNum, outDiff, channels, height, width, - outputH, outputW, sizeX, sizeY, - strideH, strideW, paddingH, paddingW, - scaleTargets, scaleOutput, - data_); + hl_avgpool_backward(frameNum, outDiff, channels, height, width, outputH, + outputW, sizeX, sizeY, strideH, strideW, paddingH, + paddingW, scaleTargets, scaleOutput, data_, + outGrad.getStride()); } void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, @@ -1005,8 +988,8 @@ void GpuMatrix::crossMapNormalFwd(Matrix& input, size_t imgSizeH, CHECK(denoms.getHeight() == input.getHeight() && denoms.getWidth() == input.getWidth() && input.getHeight() == height_ && input.getWidth() == width_); - hl_CMRNorm_forward(num, input.getData(), denoms.getData(), data_, - channels, height, width, sizeX, scale, -pow); + hl_CMRNorm_forward(num, input.getData(), denoms.getData(), data_, channels, + height, width, sizeX, scale, -pow); } void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, @@ -1026,13 +1009,11 @@ void GpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, denoms.getWidth() == localGrad.getWidth()); hl_CMRNorm_backward(num, preOutV.getData(), denoms.getData(), - localOutV.getData(), localGrad.getData(), data_, - channels, height, width, sizeX, -pow, - 2.0f * pow * scale); + localOutV.getData(), localGrad.getData(), data_, channels, + height, width, sizeX, -pow, 2.0f * pow * scale); } -void GpuMatrix::maxSequenceForward(Matrix& input, - const IVector& sequence, +void GpuMatrix::maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index) { CHECK(dynamic_cast(&input)); CHECK(dynamic_cast(&sequence)); @@ -1049,12 +1030,11 @@ void GpuMatrix::maxSequenceForward(Matrix& input, CHECK_EQ(numSequences, sequence.getSize() - 1); CHECK_EQ(numSequences * dim, index.getSize()); - hl_max_sequence_forward(inputData, starts, outData, maxIndex, - numSequences, dim); + hl_max_sequence_forward(inputData, starts, outData, maxIndex, numSequences, + dim); } -void GpuMatrix::maxSequenceBackward(Matrix& outputGrad, - const IVector& sequence, +void GpuMatrix::maxSequenceBackward(Matrix& outputGrad, const IVector& sequence, IVector& index) { CHECK(dynamic_cast(&outputGrad)); CHECK(dynamic_cast(&sequence)); @@ -1111,9 +1091,8 @@ void GpuMatrix::contextProjectionBackwardData(MatrixPtr inputGrad, real* inGrad = inputGrad->getData(); const int* starts = sequence.getData(); - hl_context_projection_backward_data(outGrad, starts, inGrad, - numSequences, inputDim, - contextLength, contextStart); + hl_context_projection_backward_data(outGrad, starts, inGrad, numSequences, + inputDim, contextLength, contextStart); } void GpuMatrix::contextProjectionBackwardWeight(MatrixPtr weightGrad, @@ -1133,9 +1112,9 @@ void GpuMatrix::contextProjectionBackwardWeight(MatrixPtr weightGrad, real* wtGrad = weightGrad->getData(); const int* starts = sequence.getData(); - hl_context_projection_backward_weight( - outGrad, starts, wtGrad, numSequences, weightDim, totalPad, contextLength, - contextStart, beginPad); + hl_context_projection_backward_weight(outGrad, starts, wtGrad, numSequences, + weightDim, totalPad, contextLength, + contextStart, beginPad); } void GpuMatrix::paramReluForward(Matrix& data, Matrix& W) { @@ -1147,8 +1126,7 @@ void GpuMatrix::paramReluForward(Matrix& data, Matrix& W) { size_t numSamples = data.getHeight(); size_t partial_sum = numElements / (W.getHeight() * W.getWidth()); real* output = getData(); - hl_param_relu_forward(output, input, w, numElements, numSamples, - partial_sum); + hl_param_relu_forward(output, input, w, numElements, numSamples, partial_sum); } void GpuMatrix::paramReluBackwardW(Matrix& oGrad, Matrix& data) { @@ -1160,8 +1138,8 @@ void GpuMatrix::paramReluBackwardW(Matrix& oGrad, Matrix& data) { size_t numElements = data.getWidth(); size_t numSamples = data.getHeight(); size_t partial_sum = numElements / (this->getHeight() * this->getWidth()); - hl_param_relu_backward_w(wgrad, ograd, input, - numElements, numSamples, partial_sum); + hl_param_relu_backward_w(wgrad, ograd, input, numElements, numSamples, + partial_sum); } void GpuMatrix::paramReluBackwardDiff(Matrix& oGrad, Matrix& data, Matrix& W) { @@ -1172,8 +1150,8 @@ void GpuMatrix::paramReluBackwardDiff(Matrix& oGrad, Matrix& data, Matrix& W) { size_t numElements = data.getWidth(); size_t numSamples = data.getHeight(); size_t partial_sum = numElements / (W.getHeight() * W.getWidth()); - hl_param_relu_backward_diff(ograd, input, w, diff, - numElements, numSamples, partial_sum); + hl_param_relu_backward_diff(ograd, input, w, diff, numElements, numSamples, + partial_sum); } void GpuMatrix::addColumnVector(const Matrix& b) { @@ -1422,8 +1400,8 @@ void CpuMatrix::transpose(MatrixPtr matTrans, bool memAlloc) { void CpuMatrix::convExpand(Matrix& feature, int feaImgHeight, int feaImgWidth, int channels, int blockH, int blockW, int strideH, - int strideW, int paddingH, int paddingW, - int outputH, int outputW) { + int strideW, int paddingH, int paddingW, int outputH, + int outputW) { CHECK(feature.useGpu_ == false) << "Matrix type are not equal"; CHECK_EQ(size_t(feaImgHeight * feaImgWidth * channels), @@ -1463,8 +1441,8 @@ void CpuMatrix::convExpand(Matrix& feature, int feaImgHeight, int feaImgWidth, void CpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, int thisImgWidth, int channels, int blockH, int blockW, int strideH, int strideW, int paddingH, - int paddingW, int outputH, int outputW, - real alpha, real beta) { + int paddingW, int outputH, int outputW, real alpha, + real beta) { CHECK(expandFeat.useGpu_ == false) << "Matrix type are not equal"; CHECK_EQ(size_t(thisImgHeight * thisImgWidth * channels), getHeight() * getWidth()) @@ -1501,11 +1479,10 @@ void CpuMatrix::convShrink(Matrix& expandFeat, int thisImgHeight, } void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, - size_t imgSizeW, size_t channels, - size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, - size_t paddingH, size_t paddingW) { + size_t imgSizeW, size_t channels, size_t sizeX, + size_t sizeY, size_t strideH, size_t strideW, + size_t outputH, size_t outputW, size_t paddingH, + size_t paddingW) { real* inputData = inputMat.getData(); real* outData = data_; size_t num = inputMat.getHeight(); @@ -1513,15 +1490,20 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, size_t inHeight = imgSizeH; CHECK(inHeight * inWidth == inputMat.getWidth() / channels); CHECK_EQ(num, this->getHeight()); - CHECK_EQ(channels*outputH*outputW, this->getWidth()); + CHECK_EQ(channels * outputH * outputW, this->getWidth()); /* initialize the data_ */ - for (size_t i = 0; i < height_ * width_; i++) { - outData[i] = -(real)FLT_MAX; + for (size_t i = 0; i < height_; i++) { + for (size_t j = 0; j < width_; j++) { + outData[i * getStride() + j] = -(real)FLT_MAX; + } } /* pool max one by one */ - for (size_t n = 0; n < num; ++n) { // frame by frame + for (size_t n = 0; n < num; ++n) { // frame by frame + if (!isContiguous()) { + outData = data_ + n * getStride(); + } for (size_t c = 0; c < channels; ++c) { // channel by channel for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { @@ -1564,6 +1546,10 @@ void CpuMatrix::maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, real* otData = outV.getData(); real* otGrad = outGrad.getData(); for (size_t n = 0; n < num; ++n) { + if (!outV.isContiguous()) { + otData = outV.getData() + n * outV.getStride(); + otGrad = outGrad.getData() + n * outGrad.getStride(); + } for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { @@ -1594,9 +1580,9 @@ void CpuMatrix::maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, size_t channels, size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, - size_t paddingH, size_t paddingW) { + size_t strideH, size_t strideW, size_t outputH, + size_t outputW, size_t paddingH, + size_t paddingW) { // The main loop size_t num = input.getHeight(); size_t inHeight = imgSizeH; @@ -1607,6 +1593,9 @@ void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, real* inData = input.getData(); for (size_t n = 0; n < num; ++n) { + if (!isContiguous()) { + tgtData = data_ + n * getStride(); + } for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { @@ -1638,9 +1627,8 @@ void CpuMatrix::avgPoolForward(Matrix& input, size_t imgSizeH, size_t imgSizeW, } void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, - size_t sizeX, size_t sizeY, - size_t strideH, size_t strideW, - size_t outputH, size_t outputW, + size_t sizeX, size_t sizeY, size_t strideH, + size_t strideW, size_t outputH, size_t outputW, real scaleTargets, real scaleOutput, size_t paddingH, size_t paddingW) { size_t num = input.getHeight(); @@ -1650,6 +1638,9 @@ void CpuMatrix::avgPoolBackward(Matrix& input, size_t imgSizeH, size_t imgSizeW, real* outData = getData(); for (size_t n = 0; n < num; ++n) { + if (!input.isContiguous()) { + inData = input.getData() + n * input.getStride(); + } for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { for (size_t pw = 0; pw < outputW; ++pw) { @@ -1752,8 +1743,7 @@ void CpuMatrix::crossMapNormalBwd(Matrix& localGrad, Matrix& denoms, * Output: output size is the number of input sequences (NOT input instances). * output[i] is set to max_{for each instance in this sequence}{input[i]} */ -void CpuMatrix::maxSequenceForward(Matrix& input, - const IVector& sequence, +void CpuMatrix::maxSequenceForward(Matrix& input, const IVector& sequence, IVector& index) { CHECK(dynamic_cast(&input)); CHECK(dynamic_cast(&sequence)); @@ -1794,8 +1784,7 @@ void CpuMatrix::maxSequenceForward(Matrix& input, } } -void CpuMatrix::maxSequenceBackward(Matrix& outputGrad, - const IVector& sequence, +void CpuMatrix::maxSequenceBackward(Matrix& outputGrad, const IVector& sequence, IVector& index) { CHECK(dynamic_cast(&outputGrad)); CHECK(dynamic_cast(&sequence)); @@ -2000,8 +1989,7 @@ void CpuMatrix::collectBias(Matrix& a, real scale) { } } -void CpuMatrix::sequenceAvgForward(Matrix& a, - const IVector& startsPos, +void CpuMatrix::sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode) { size_t height = getHeight(); size_t width = getWidth(); @@ -2592,7 +2580,7 @@ void SharedCpuMatrix::mul(CpuSparseMatrix* a, CpuMatrix* b, real scaleAB, blockSeq.push_back(k); } std::shuffle(blockSeq.begin(), blockSeq.end(), - ThreadLocalRandomEngine::get()); + ThreadLocalRandomEngine::get()); } std::vector& localBufRows = *localBufRows_; int* cols = a->getCols(); @@ -2823,7 +2811,7 @@ void CpuMatrix::maxoutForward(Matrix& a, IVector& id, size_t channels, size_t size = getWidth(); size_t batchSize = getHeight(); size_t featLen = size / channels; - const real* input = a.getData(); + const real* input = a.getData(); int* idForCpu = id.getData(); MatrixPtr maxInMat, maxOutMat; @@ -2857,8 +2845,8 @@ void CpuMatrix::maxoutBackward(Matrix& a, IVector& id, size_t channels, size_t batchSize = getHeight(); size_t featLen = size / channels; size_t newFeatLen = groups * featLen; - real* inputG = getData(); - const real* outG = a.getData(); + real* inputG = getData(); + const real* outG = a.getData(); int* idForCpu = id.getData(); for (size_t batch_idx = 0; batch_idx < batchSize; ++batch_idx) { @@ -3082,9 +3070,9 @@ void CpuMatrix::sequenceSoftmax(Matrix& output, const IVector& index) { CHECK(isContiguous()); MatrixPtr inTmp = Matrix::create(nullptr, /* height= */ 1, 1, - /* trans= */ false, false); + /* trans= */ false, false); MatrixPtr outTmp = Matrix::create(nullptr, /* height= */ 1, 1, - /* trans= */ false, false); + /* trans= */ false, false); size_t numSequences = index.getSize() - 1; auto starts = index.getData(); for (size_t i = 0; i < numSequences; ++i) { diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 70c1f8d563238..5dac2f8204190 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -120,6 +120,14 @@ message PoolConfig { optional uint32 padding_y = 13 [default = 0]; } +message SppConfig { + required string pool_type = 1; + required uint32 pyramid_height = 2; + required uint32 channels = 3; + required uint32 img_size = 4; + optional uint32 img_size_y = 5; +} + message NormConfig { // rnorm or cmrnorm required string norm_type = 1; @@ -194,6 +202,9 @@ message ProjectionConfig { optional ConvConfig conv_conf = 8; optional int32 num_filters = 9; + // For pool + optional PoolConfig pool_conf = 10; + // For IdentityOffsetProjection optional uint64 offset = 11 [default = 0]; } @@ -235,6 +246,7 @@ message LayerInputConfig { // Set the argument name. optional string input_layer_argument = 9; optional MaxOutConfig maxout_conf = 10; + optional SppConfig spp_conf = 11; } message LayerConfig { From 45c81a414f912b93c6ad34fa4045e8e80fd396f6 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 2 Nov 2016 09:56:20 +0800 Subject: [PATCH 060/180] Add job=time in trainer, refine cudnn_conv to reduce gpu memory and speed up training. (#218) * Add benchmark for PaddlePaddle, tensorflow and caffe * ConvProjection to reduce memory for goolenet * Add unit test for ConvProjection. 1. unit test in test_LayerGrad. 2. compare the ConvPorjection and CudnnConvLayer, also compare the concat_layer+img_conv_layer and concat_layer_conv_projection. * Reduce cudnn_conv memory and add benchmark document. 1. Use TmpMatrix as the workspace in cudnn_conv to reduce gpu memory. It reduce lots of memory. 2. Add benchmark document. 3. fix smallnet_mnist_cifar.py in paddle. * Add job=time and refine cudnn_conv to reduce gpu memroy and speed up * Refine cudnn_conv and shared biases operation in concat_layer and mixed_layer. * follow comments * follow comments * Use unique_ptr to prevent memory leaks in CudnnConvLayer. --- doc/ui/cmd_argument/argument_outline.md | 7 +- doc/ui/cmd_argument/detail_introduction.md | 4 + paddle/cuda/include/hl_device_functions.cuh | 19 ++ paddle/cuda/include/hl_matrix.h | 36 +++ paddle/cuda/include/stub/hl_matrix_stub.h | 13 + paddle/cuda/src/hl_cuda_cudnn.cc | 7 +- paddle/cuda/src/hl_cuda_matrix.cu | 87 ++++++ paddle/cuda/src/hl_cuda_sparse.cuh | 18 -- paddle/gserver/layers/ConcatenateLayer.cpp | 33 ++- paddle/gserver/layers/ConvBaseLayer.cpp | 51 ++-- paddle/gserver/layers/ConvBaseLayer.h | 24 +- paddle/gserver/layers/ConvProjection.cpp | 210 ++++++++++++++ paddle/gserver/layers/ConvProjection.h | 125 +++++++++ paddle/gserver/layers/CudnnConvLayer.cpp | 256 +++--------------- paddle/gserver/layers/CudnnConvLayer.h | 73 +---- paddle/gserver/layers/ExpandConvLayer.cpp | 39 ++- paddle/gserver/layers/ExpandConvLayer.h | 10 +- paddle/gserver/layers/MixedLayer.cpp | 20 +- paddle/gserver/layers/MixedLayer.h | 1 + paddle/gserver/tests/LayerGradUtil.cpp | 6 +- paddle/gserver/tests/LayerGradUtil.h | 3 +- paddle/gserver/tests/img_conv_a.conf | 39 +++ paddle/gserver/tests/img_conv_b.conf | 32 +++ paddle/gserver/tests/test_LayerGrad.cpp | 39 +++ paddle/gserver/tests/test_NetworkCompare.cpp | 9 + paddle/math/Matrix.cpp | 52 ++++ paddle/math/Matrix.h | 28 ++ paddle/math/tests/test_matrixCompare.cpp | 56 ++++ paddle/trainer/CMakeLists.txt | 1 + paddle/trainer/Trainer.h | 1 + paddle/trainer/TrainerBenchmark.cpp | 71 +++++ paddle/trainer/TrainerMain.cpp | 2 + proto/ModelConfig.proto.m4 | 5 +- python/paddle/trainer/config_parser.py | 68 ++++- .../paddle/trainer_config_helpers/layers.py | 105 ++++++- .../paddle/trainer_config_helpers/networks.py | 165 ++++++++++- .../tests/configs/check.md5 | 9 +- .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/test_bi_grumemory.py | 10 + 39 files changed, 1340 insertions(+), 396 deletions(-) create mode 100644 paddle/gserver/layers/ConvProjection.cpp create mode 100644 paddle/gserver/layers/ConvProjection.h create mode 100644 paddle/gserver/tests/img_conv_a.conf create mode 100644 paddle/gserver/tests/img_conv_b.conf create mode 100644 paddle/trainer/TrainerBenchmark.cpp create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py diff --git a/doc/ui/cmd_argument/argument_outline.md b/doc/ui/cmd_argument/argument_outline.md index 98dadc270dcac..d6cc2c6ed7cc1 100644 --- a/doc/ui/cmd_argument/argument_outline.md +++ b/doc/ui/cmd_argument/argument_outline.md @@ -183,7 +183,7 @@ It looks like there are a lot of arguments. However, most of them are for develo -GPUgpu_id +GPUgpu_id √√√√ @@ -207,6 +207,11 @@ It looks like there are a lot of arguments. However, most of them are for develo √√√√ + +cudnn_conv_workspace_limit_in_mb +√√√√ + + RNN beam_size diff --git a/doc/ui/cmd_argument/detail_introduction.md b/doc/ui/cmd_argument/detail_introduction.md index 0d0362d022a72..07608e5edf740 100644 --- a/doc/ui/cmd_argument/detail_introduction.md +++ b/doc/ui/cmd_argument/detail_introduction.md @@ -163,6 +163,10 @@ - Choose path to dynamic load NVIDIA CUDA library, for instance, /usr/local/cuda/lib64. [Default]: LD_LIBRARY_PATH - type: string (default: "", null) +* `--cudnn_conv_workspace_limit_in_mb` + - Specify cuDNN max workspace limit, in units MB, 4096MB=4GB by default. + - type: int32 (default: 4096MB=4GB) + ## NLP: RNN/LSTM/GRU * `--rnn_use_batch` - Whether to use batch method for calculation in simple RecurrentLayer. diff --git a/paddle/cuda/include/hl_device_functions.cuh b/paddle/cuda/include/hl_device_functions.cuh index 88d950d6c1713..159c26f443cb1 100755 --- a/paddle/cuda/include/hl_device_functions.cuh +++ b/paddle/cuda/include/hl_device_functions.cuh @@ -48,5 +48,24 @@ inline __device__ double paddleAtomicAdd(double* address, double val) { } } // namespace paddle +/** + * @brief sum reduction + * + * @param[in,out] smem input data, better to use __shared__ memory. + * @param[in] tid thread index. + * @param[in] threads the total thread number used to reduce, + * such as, blockDim.x. + * + * @return smem[0]: the sum of each elements in smem. + */ +__device__ __forceinline__ +void simpleReduce(real* smem, int tid, int threads) { + for (unsigned int s = threads / 2; s > 0; s >>= 1) { + if (tid < s) { + smem[tid] += smem[tid + s]; + } + __syncthreads(); + } +} #endif /* HL_DEVICE_FUNCTIONS_CUH_ */ diff --git a/paddle/cuda/include/hl_matrix.h b/paddle/cuda/include/hl_matrix.h index 17419790471a7..71e8f8e3a60c9 100644 --- a/paddle/cuda/include/hl_matrix.h +++ b/paddle/cuda/include/hl_matrix.h @@ -229,4 +229,40 @@ extern void hl_cossim_derivative(real* grad, int input2_height, real scale); +/** + * @brief Matrix addition: A_d[i][j] += scale * B_d[j/channel]. + * + * @param[in] A_d input matrix (M x N). + * @param[in] B_d input matrix (1 x channel). + * @param[in] channel width of B. + * @param[in] dimM height of A. + * @param[in] dimN width of A. + * @param[in] scale scalar used for addition. + * + */ +extern void hl_matrix_add_shared_bias(real* A_d, + real* B_d, + const int channel, + const int dimM, + const int dimN, + real scale); + +/** + * @brief Matrix addition: A_d[i][j] += scale * B_d[j/channel]. + * + * @param[in] B_d input matrix (1 x channel). + * @param[in] A_d input matrix (M x N). + * @param[in] channel width of B. + * @param[in] dimM height of A. + * @param[in] dimN width of A. + * @param[in] scale scalar used for addition. + * + */ +extern void hl_matrix_collect_shared_bias(real* B_d, + real* A_d, + const int channel, + const int dimM, + const int dimN, + real scale); + #endif /* HL_MATRIX_H_ */ diff --git a/paddle/cuda/include/stub/hl_matrix_stub.h b/paddle/cuda/include/stub/hl_matrix_stub.h index f1f1020c84d46..e37b1275432ca 100644 --- a/paddle/cuda/include/stub/hl_matrix_stub.h +++ b/paddle/cuda/include/stub/hl_matrix_stub.h @@ -101,4 +101,17 @@ inline void hl_cossim_derivative(real* grad, int input2_height, real scale) {} +inline void hl_matrix_add_shared_bias(real* A_d, + real* B_d, + const int channel, + const int dimM, + const int dimN, + real scale) {} + +inline void hl_matrix_collect_shared_bias(real* B_d, + real* A_d, + const int channel, + const int dimM, + const int dimN, + real scale) {} #endif // HL_MATRIX_STUB_H_ diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index b215c0f6e33a1..7810d0d10053d 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -20,6 +20,11 @@ limitations under the License. */ #include "hl_thread.ph" #include "hl_dso_loader.h" #include "paddle/utils/Logging.h" +#include "paddle/utils/CommandLineParser.h" + +P_DEFINE_int32(cudnn_conv_workspace_limit_in_mb, 4096, + "Specify cuDNN max workspace limit, in units MB, " + "4096MB=4GB by default."); namespace dynload { @@ -242,7 +247,7 @@ void hl_conv_workspace(hl_tensor_descriptor input, CHECK_NOTNULL(conv); // Specify workspace limit directly - size_t memoryLimitBytes = 8 * 1024 * 1024; + size_t memoryLimitBytes = (1LL << 20) * FLAGS_cudnn_conv_workspace_limit_in_mb; // cudnn convolution forward configuration cudnnTensorDescriptor_t fwd_src_desc = GET_TENSOR_DESCRIPTOR(input); diff --git a/paddle/cuda/src/hl_cuda_matrix.cu b/paddle/cuda/src/hl_cuda_matrix.cu index 067e68c41e119..3df9f63f9e4b7 100644 --- a/paddle/cuda/src/hl_cuda_matrix.cu +++ b/paddle/cuda/src/hl_cuda_matrix.cu @@ -20,6 +20,7 @@ limitations under the License. */ #include "hl_sequence.h" #include "paddle/utils/Logging.h" #include "hl_device_functions.cuh" +#include "hl_gpu_matrix_kernel.cuh" DEFINE_MATRIX_UNARY_OP(Zero, a = 0); DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1*a + p2*b); @@ -673,3 +674,89 @@ void hl_cossim_derivative(real* grad, input1_height, input2_height, scale); CHECK_SYNC("hl_cossim_derivate failed"); } + +__global__ void KeMatrixAddSharedBias(real* A, + real* B, + const int channel, + const int M, + const int N, + real scale) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + int dim = N / channel; + if (index < M * N) { + int i = index % N; + i = i / dim; + A[index] += scale * B[i]; + } +} + +void hl_matrix_add_shared_bias(real* A_d, + real* B_d, + const int channel, + const int dimM, + const int dimN, + real scale) { + const int blocks = 512; + const int grids = DIVUP(dimM * dimN, blocks); + KeMatrixAddSharedBias<<>> + (A_d, B_d, channel, dimM, dimN, scale); + CHECK_SYNC("hl_matrix_add_shared_bias failed"); +} + + +template +__global__ void KeMatrixCollectSharedBias(real *B, + real *A, + const int channel, + const int M, + const int N, + const int dim, + const int limit, + real scale) { + if (dim < limit) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < channel) { + real sum = 0.0; + for (int i = 0; i < M; ++i) { + for (int j = 0; j < dim; ++j) { + sum += A[i * N + index * dim + j]; + } + } + B[index] += scale * sum; + } + } else { + const int tid = threadIdx.x; + const int bid = blockIdx.x; + __shared__ real smem[blockSize]; + real sum = 0.0; + for (int j = 0; j < ((dim * M + blockSize - 1) / blockSize); ++j) { + int n = j * blockSize + tid; + int m = n / dim; + int w = n % dim; + smem[tid] = (m < M && w < dim) ? A[m * N + bid * dim + w] : 0.0; + __syncthreads(); + simpleReduce(smem, tid, blockSize); + sum += smem[0]; + } + if (tid == 0) { + B[bid] += scale * sum; + } + } +} + +void hl_matrix_collect_shared_bias(real* B_d, + real* A_d, + const int channel, + const int dimM, + const int dimN, + real scale) { + const int dim = dimN / channel; + const int blocks = 256; + const int limit = 64; + int grids = (dimM * dim) < limit ? DIVUP(channel, blocks) : channel; + + KeMatrixCollectSharedBias + <<< grids, blocks, 0, STREAM_DEFAULT>>> + (B_d, A_d, channel, dimM, dimN, dim, limit, scale); + CHECK_SYNC("hl_matrix_collect_shared_bias failed"); +} diff --git a/paddle/cuda/src/hl_cuda_sparse.cuh b/paddle/cuda/src/hl_cuda_sparse.cuh index c3b98f4ebc38d..9cf2d5a843343 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cuh +++ b/paddle/cuda/src/hl_cuda_sparse.cuh @@ -908,24 +908,6 @@ int findIndex(int* indice, int num, int index) { return (end - 1); } -/** - * @brief sum reduction - * - * @param[in,out] smem input data, better to use __shared__ memory. - * @param[in] tid local thread index. - * @param[in] blockDimX the size of blockDim.x. - * - * note: return smem[0]: the sum of each elements of smem. - */ -__device__ __forceinline__ -void reduce(real* smem, int tid, int blockDimX) { - for (unsigned int s = blockDimX / 2; s > 0; s >>= 1) { - if (tid < s) { - smem[tid] += smem[tid + s]; - } - __syncthreads(); - } -} /** * @brief sum columns of csr sparse matrix (csr_val), then add to a_val. diff --git a/paddle/gserver/layers/ConcatenateLayer.cpp b/paddle/gserver/layers/ConcatenateLayer.cpp index 52a7cb6f777c3..bb6709b8df330 100644 --- a/paddle/gserver/layers/ConcatenateLayer.cpp +++ b/paddle/gserver/layers/ConcatenateLayer.cpp @@ -97,7 +97,8 @@ void ConcatenateLayer::backward(const UpdateCallback& callback) { */ class ConcatenateLayer2 : public Layer { public: - explicit ConcatenateLayer2(const LayerConfig& config) : Layer(config) {} + explicit ConcatenateLayer2(const LayerConfig& config) : + Layer(config) {} ~ConcatenateLayer2() {} @@ -110,6 +111,8 @@ class ConcatenateLayer2 : public Layer { std::vector> projections_; std::vector projOutput_; std::vector> projCol_; + bool sharedBias_; + std::unique_ptr biases_; }; REGISTER_LAYER(concat2, ConcatenateLayer2); @@ -119,7 +122,6 @@ bool ConcatenateLayer2::init(const LayerMap& layerMap, /* Initialize the basic parent class */ if (!Layer::init(layerMap, parameterMap)) return false; - CHECK(!biasParameter_); CHECK_EQ(inputLayers_.size(), parameters_.size()); projections_.reserve(inputLayers_.size()); projCol_.reserve(inputLayers_.size()); @@ -137,6 +139,13 @@ bool ConcatenateLayer2::init(const LayerMap& layerMap, } CHECK_EQ(getSize(), endCol); + /* initialize biases_ */ + if (biasParameter_.get() != NULL) { + sharedBias_ = config_.shared_biases(); + size_t psize = config_.bias_size(); + biases_ = std::unique_ptr(new Weight(1, psize, biasParameter_)); + } + return true; } @@ -154,8 +163,17 @@ void ConcatenateLayer2::forward(PassType passType) { projOutput_[i].grad = output_.grad->subColMatrix(startCol, endCol); } - for (size_t i = 0; i != inputLayers_.size(); ++i) { - projections_[i]->forward(&getInput(i), &projOutput_[i], passType); + { + AsyncGpuBlock block; + for (size_t i = 0; i != inputLayers_.size(); ++i) { + projections_[i]->forward(&getInput(i), &projOutput_[i], passType); + } + } + + /* add the bias-vector */ + if (biases_) { + REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str()); + output_.value->addBias(*(biases_->getW()), 1, sharedBias_); } /* activation */ { @@ -170,6 +188,13 @@ void ConcatenateLayer2::backward(const UpdateCallback& callback) { backwardActivation(); } + AsyncGpuBlock block; + if (biases_ && biases_->getWGrad()) { + REGISTER_TIMER_INFO("Concat2BpBiasTimer", getName().c_str()); + biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBias_); + biases_->getParameterPtr()->incUpdate(callback); + } + for (size_t i = 0; i != inputLayers_.size(); ++i) { if (projections_[i]) { projections_[i]->backward(callback); diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 9ed9572139dc8..040510b7ad211 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -35,25 +35,12 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, filterSizeY_.push_back(conf.filter_size_y()); filterPixels_.push_back(filterSize_.back() * filterSizeY_.back()); channels_.push_back(conf.channels()); - imgSize_.push_back(conf.img_size()); - imgPixels_.push_back(imgSize_.back() * imgSize_.back()); + imgSizeH_.push_back(conf.img_size()); + imgSizeW_.push_back(conf.img_size()); groups_.push_back(conf.groups()); filterChannels_.push_back(conf.filter_channels()); - outputX_.push_back(conf.output_x()); - outputs_.push_back(outputX_.back() * outputX_.back()); - } - - /* initialize the weightList */ - CHECK(inputLayers_.size() == parameters_.size()); - for (size_t i = 0; i < inputLayers_.size(); i++) { - size_t height, width; - height = filterPixels_[i] * filterChannels_[i]; - width = numFilters_; - - // create a new weight - CHECK_EQ(parameters_[i]->getSize(), width * height); - Weight* w = new Weight(height, width, parameters_[i]); - weights_.emplace_back(w); + outputH_.push_back(conf.output_x()); + outputW_.push_back(conf.output_x()); } /* initialize the biases_ */ @@ -74,4 +61,34 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, return true; } +size_t ConvBaseLayer::calOutputSize() { + auto clearAndReserve = [this](IntV* vec) { + vec->clear(); + vec->reserve(this->inputLayers_.size()); + }; + clearAndReserve(&imgSizeH_); + clearAndReserve(&imgSizeW_); + clearAndReserve(&outputH_); + clearAndReserve(&outputW_); + size_t layerSize = 0; + for (size_t i = 0; i < inputLayers_.size(); i++) { + imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (imgSizeH_[i] == 0) + imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); + if (imgSizeW_[i] == 0) + imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); + outputH_.push_back( + outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outputW_.push_back( + outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(outputH_[i], outputH_[0]); + CHECK_EQ(outputW_[i], outputW_[0]); + } + getOutput().setFrameHeight(outputH_[0]); + getOutput().setFrameWidth(outputW_[0]); + layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); + return layerSize; +} + } // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index eaeaebf43be25..316514acf1a0d 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -43,19 +43,18 @@ class ConvBaseLayer : public Layer { IntV filterSizeY_; /// The spatial dimensions of the convolution input. IntV channels_; - /// The spatial dimensions of input feature map. - IntV imgSize_; - /// The total pixel size of input feature map. - /// imgPixels_ = imgSizeX_ * imgSizeY_. - IntV imgPixels_; + /// The spatial dimensions of input feature map height. + IntV imgSizeH_; + /// The spatial dimensions of input feature map width. + IntV imgSizeW_; /// filterPixels_ = filterSizeX_ * filterSizeY_. IntV filterPixels_; /// filterChannels_ = channels_/groups_. IntV filterChannels_; - /// The spatial dimensions of output feature map. - IntV outputX_; - /// The spatial dimensions of output feature map. - IntV outputs_; + /// The spatial dimensions of output feature map height. + IntV outputH_; + /// The spatial dimensions of output feature map width. + IntV outputW_; /// Group size, refer to grouped convolution in /// Alex Krizhevsky's paper: when group=2, the first half of the /// filters are only connected to the first half of the input channels, @@ -80,6 +79,13 @@ class ConvBaseLayer : public Layer { virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + /** + * imgSizeH_ and imgSizeW_ will be set according to the previous input layers + * in this function. Then it will calculate outputH_ and outputW_ and set them + * into output argument. + */ + virtual size_t calOutputSize(); + Weight& getWeight(int idx) { return *weights_[idx]; } /** diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/gserver/layers/ConvProjection.cpp new file mode 100644 index 0000000000000..d1ce53fe26351 --- /dev/null +++ b/paddle/gserver/layers/ConvProjection.cpp @@ -0,0 +1,210 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#include "paddle/utils/Stat.h" +#include "ConvProjection.h" + +namespace paddle { + +REGISTER_PROJECTION(conv, ConvProjection); + +ThreadLocalD> ConvProjection::convMem_; + +ConvProjection::ConvProjection(const ProjectionConfig& config, + ParameterPtr parameter, bool useGpu) + : Projection(config, parameter, useGpu) { + + CHECK(useGpu); // only support GPU + getConvParams(); + initCudnn(); + + size_t height = filterH_ * filterW_ * channels_ / groups_; + size_t width = numFilters_; + weight_.reset(new Weight(height, width, parameter)); + weightOffset_ = height * width / groups_; +} + +void ConvProjection::getConvParams() { + const ConvConfig &conf = config_.conv_conf(); + paddingH_ = conf.padding_y(); + paddingW_ = conf.padding(); + + strideH_ = conf.stride_y(); + strideW_ = conf.stride(); + + filterH_ = conf.filter_size_y(); + filterW_ = conf.filter_size(); + + configImgH_ = conf.img_size(); + configImgW_ = conf.img_size(); + + channels_ = conf.channels(); + numFilters_ = config_.num_filters(); + + groups_ = conf.groups(); + CHECK_EQ(channels_ % groups_, 0); + CHECK_EQ(numFilters_ % groups_, 0); +} + +void ConvProjection::initCudnn() { + hl_create_filter_descriptor(&filterDesc_, channels_, numFilters_, + filterH_, filterW_); + hl_create_tensor_descriptor(&inputDesc_); + hl_create_tensor_descriptor(&outputDesc_); + hl_create_convolution_descriptor(&convDesc_, inputDesc_, filterDesc_, + paddingH_, paddingW_, strideH_, strideW_); + + // initialize all to default algorithms + fwdAlgo_ = 0; + bwdFilterAlgo_ = 0; + bwdDataAlgo_ = 0; + fwdLimitBytes_ = 0; + bwdDataLimitBytes_ = 0; + bwdFilterLimitBytes_ = 0; + workSpaceInBytes_ = 0; + + batchNum_ = 0; + isSelectAlgo_ = false; +} + +void ConvProjection::reshapeTensorDesc(int batchSize) { + hl_tensor_reshape(inputDesc_, batchSize, channels_, imageH_, imageW_, + channels_ * imageH_ * imageW_, imageH_ * imageW_, + imageW_, 1); + hl_reset_convolution_descriptor(convDesc_, inputDesc_, filterDesc_, + paddingH_, paddingW_, strideH_, strideW_); + + // The stride between two consecutive images in ConvProjection may not be 1, + // for example, in the case of layer ConcatenateLayer2 with two + // ConvProjection, the stride is the output_size of layer ConcatenateLayer2. + // So the calculation of nStride is different from CudnnConvLayer. + // In fact, only "nStride = out_->value->getStride()" is ok. + size_t nStride = numFilters_ * outputH_ * outputW_; + if (out_->value->isContiguous()) { + CHECK_EQ(nStride, out_->value->getWidth()); + } else { + nStride = out_->value->getStride(); + } + + hl_tensor_reshape(outputDesc_, batchSize, numFilters_, outputH_, outputW_, + nStride, outputH_ * outputW_, outputW_, 1); +} + +void ConvProjection::reshape(int batchSize) { + size_t width = calOutputSize(); + CHECK_EQ(width, out_->value->getWidth()); + + isSelectAlgo_ = (batchSize == batchNum_); + batchNum_ = batchSize; + + if (!isSelectAlgo_) { + reshapeTensorDesc(batchSize); + hl_conv_workspace(inputDesc_, outputDesc_, filterDesc_, + convDesc_, &fwdAlgo_, &fwdLimitBytes_, + &bwdDataAlgo_, &bwdDataLimitBytes_, + &bwdFilterAlgo_, &bwdFilterLimitBytes_); + + size_t maxWorkSpace = 0; + maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); + maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_); + workSpaceInBytes_ = maxWorkSpace; + + + VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_ + << " / " << bwdDataAlgo_ + << " / " << bwdFilterAlgo_; + } + + isSelectAlgo_ = true; +} + +void ConvProjection::forward() { + int batchSize = in_->value->getHeight(); + reshape(batchSize); + + void* workSpace = NULL; + if (workSpaceInBytes_ > 0) { + workSpace = getSpaceBytes(workSpaceInBytes_); + } + + for (int g = 0; g < groups_; ++g) { + REGISTER_TIMER_INFO("CudnnConvFwTimer", getName().c_str()); + + real *inputData = in_->value->getData() + g * inputOffset_; + real *wgtData = weight_->getW()->getData() + g * weightOffset_; + real *outData = out_->value->getData() + g * outputOffset_; + hl_convolution_forward(inputDesc_, inputData, outputDesc_, + outData, filterDesc_, wgtData, + convDesc_, workSpace, + fwdLimitBytes_, fwdAlgo_); + } +} + +void ConvProjection::backward(const UpdateCallback& callback) { + REGISTER_TIMER_INFO("CudnnConvBpTimer", getName().c_str()); + + void* workSpace = NULL; + if (workSpaceInBytes_ > 0) { + workSpace = getSpaceBytes(workSpaceInBytes_); + } + + for (int g = 0; g < groups_; ++g) { + real *outGrad = out_->grad->getData() + g * outputOffset_; + if (weight_->getWGrad()) { + real *inputData = in_->value->getData() + g * inputOffset_; + real *weightGrad = weight_->getWGrad()->getData() + g * weightOffset_; + hl_convolution_backward_filter( + inputDesc_, inputData, outputDesc_, outGrad, filterDesc_, + weightGrad, convDesc_, workSpace, bwdFilterLimitBytes_, + bwdFilterAlgo_); + } + + MatrixPtr preGrad = in_->grad; + if (NULL != preGrad) { + real *inputGrad = preGrad->getData() + g * inputOffset_; + real *wgtData = weight_->getW()->getData() + g* weightOffset_; + hl_convolution_backward_data( + inputDesc_, inputGrad, outputDesc_, outGrad, filterDesc_, + wgtData, convDesc_, workSpace, bwdDataLimitBytes_, + bwdDataAlgo_); + } + } + + weight_->getParameterPtr()->incUpdate(callback); +} + +void* ConvProjection::getSpaceBytes(size_t size) { + std::vector& convMem = *convMem_; + if (convMem.empty()) { + int numDevices = hl_get_device_count(); + convMem.resize(numDevices); + } + + int devId = hl_get_device(); + MemoryHandle** localMem = &(convMem[devId]); + if (NULL == *localMem || size > (*localMem)->getAllocSize()) { + *localMem = new GpuMemoryHandle(size); + } + return (*localMem)->getBuf(); +} + +ConvProjection::~ConvProjection() { + hl_destroy_tensor_descriptor(inputDesc_); + hl_destroy_tensor_descriptor(outputDesc_); + hl_destroy_filter_descriptor(filterDesc_); + hl_destroy_convolution_descriptor(convDesc_); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvProjection.h b/paddle/gserver/layers/ConvProjection.h new file mode 100644 index 0000000000000..41a100ac3c50f --- /dev/null +++ b/paddle/gserver/layers/ConvProjection.h @@ -0,0 +1,125 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#pragma once + +#include "Projection.h" + +namespace paddle { + +/** + * @brief Convolution projection do the same calculation with CudnnConvLayer. + */ +class ConvProjection : public Projection { +public: + /** + * Constructor. + */ + ConvProjection(const ProjectionConfig& config, ParameterPtr parameter, + bool useGpu); + + ~ConvProjection(); + + virtual void forward(); + virtual void backward(const UpdateCallback& callback); + +protected: + void getConvParams(); + void initCudnn(); + + void reshapeTensorDesc(int batchSize); + void reshape(int batchSize); + + int outputSize(int imageSize, int filterSize, int padding, int stride) { + return (imageSize - filterSize + 2 * padding) / stride + 1; + } + + size_t calOutputSize() { + imageH_ = in_->getFrameHeight(); + imageW_ = in_->getFrameWidth(); + if (imageH_ == 0) imageH_ = configImgH_; + if (imageW_ == 0) imageW_ = configImgW_; + outputH_ = outputSize(imageH_, filterH_, paddingH_, strideH_); + outputW_ = outputSize(imageW_, filterW_, paddingW_, strideW_); + + const_cast(out_)->setFrameHeight(outputH_); + const_cast(out_)->setFrameWidth(outputW_); + + inputOffset_ = (channels_ / groups_) * imageH_ * imageW_; + outputOffset_ = (numFilters_ / groups_) * outputH_ * outputW_; + return outputH_ * outputW_ * numFilters_; + } + + static void* getSpaceBytes(size_t size); + + /// imageH_ and imageW_ is calculated from the input layer. + int imageH_, imageW_; + /// configImgH_ and configImgW_ is obtained from config. + int configImgH_, configImgW_; + int outputH_, outputW_; + int channels_, numFilters_; + int paddingH_, paddingW_; + int strideH_, strideW_; + int filterH_, filterW_; + /// One group offset of input data. + int inputOffset_; + /// One group offset of output data. + int outputOffset_; + /// One group offset of weight. + int weightOffset_; + int groups_; + + /// Cudnn tensor descriptor for input. + hl_tensor_descriptor inputDesc_; + /// Cudnn tensor descriptor for output. + hl_tensor_descriptor outputDesc_; + /// Cudnn tensor descriptor for filter. + hl_filter_descriptor filterDesc_; + /// Cudnn tensor descriptor for a convolution operation. + hl_convolution_descriptor convDesc_; + + /// Record the algorithm for forward convolution, which is obtained by cudnn + /// api to search the best suited algorithm. + int fwdAlgo_; + /// Record the algorithm for computing convolution gradient with respect to + /// filter coefficients. + int bwdFilterAlgo_; + /// Record the algorithm for computing convolution gradient with respect to + /// the output. + int bwdDataAlgo_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// forward convolution with the specified algo. + size_t fwdLimitBytes_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// backwardFilter with the specified algo. + size_t bwdDataLimitBytes_; + /// Amount of GPU memory needed as workspace to be able to execute a + /// backwardData with the specified algo. + size_t bwdFilterLimitBytes_; + /// Size of total work space. + size_t workSpaceInBytes_; + + /// Whether to call cuDNN api to choose conv algorithm. + bool isSelectAlgo_; + /// batchNum is used to record batch size. If the batch size is changed, + /// the selection algorithm will be called. + int batchNum_; + bool bias_; + + std::unique_ptr weight_; + static ThreadLocalD> convMem_; +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/CudnnConvLayer.cpp b/paddle/gserver/layers/CudnnConvLayer.cpp index 0f932f960f6ba..e77216f17c3be 100644 --- a/paddle/gserver/layers/CudnnConvLayer.cpp +++ b/paddle/gserver/layers/CudnnConvLayer.cpp @@ -22,215 +22,64 @@ REGISTER_LAYER(cudnn_conv, CudnnConvLayer); bool CudnnConvLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { - ConvBaseLayer::init(layerMap, parameterMap); + if (!ConvBaseLayer::init(layerMap, parameterMap)) return false; CHECK(useGpu_) << "CudnnConvLayer only support gpu"; - maxGroups_ = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - CHECK_EQ(channels_[i] % groups_[i], 0); - CHECK_EQ(numFilters_ % groups_[i], 0); - - hl_filter_descriptor filter; - hl_create_filter_descriptor(&filter, channels_[i] / groups_[i], - numFilters_ / groups_[i], filterSizeY_[i], - filterSize_[i]); - filterDesc_.push_back(filter); - - hl_tensor_descriptor input; - hl_create_tensor_descriptor(&input); - inputDesc_.push_back(input); - - hl_tensor_descriptor output; - int outputX = - outputSize(imgSize_[i], filterSize_[i], padding_[i], stride_[i]); - CHECK_EQ(outputX, outputX_[i]); - hl_create_tensor_descriptor(&output); - outputDesc_.push_back(output); + CHECK_EQ(inputLayers_.size(), parameters_.size()); + projections_.reserve(inputLayers_.size()); + projConf_.reserve(inputLayers_.size()); - hl_convolution_descriptor conv; - hl_create_convolution_descriptor(&conv, input, filter, paddingY_[i], - padding_[i], strideY_[i], stride_[i]); - convDesc_.push_back(conv); - - weightOffset_.push_back((numFilters_ / groups_[i]) * - (channels_[i] / groups_[i]) * filterPixels_[i]); - inputOffset_.push_back((channels_[i] / groups_[i]) * imgSize_[i] * - imgSize_[i]); - outputOffset_.push_back((numFilters_ / groups_[i]) * outputX_[i] * - outputX_[i]); - - // initialize all to default algorithms - fwdAlgo_.push_back(0); - bwdFilterAlgo_.push_back(0); - bwdDataAlgo_.push_back(0); - fwdLimitBytes_.push_back(0); - bwdFilterLimitBytes_.push_back(0); - bwdDataLimitBytes_.push_back(0); - - // cudnn streams per group equal to 1 - if (groups_[i] > maxGroups_) { - maxGroups_ = groups_[i]; - } - } - - workSpaceInBytes_ = 0; - workSpaceData_ = NULL; - for (int i = 0; i < maxGroups_; ++i) { - workSpace_.push_back(NULL); + numFilters_ = config_.num_filters(); + CHECK(config_.shared_biases()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + ProjectionConfig* conf = new ProjectionConfig(); + conf->set_type("conv"); + conf->set_num_filters(numFilters_); + conf->set_allocated_conv_conf( + config_.mutable_inputs(i)->mutable_conv_conf()); + conf->set_input_size(getPrev(i)->getSize()); + conf->set_output_size(getSize()); + projConf_.emplace_back(conf); + projections_.emplace_back(Projection::create(*projConf_[i], + parameters_[i], useGpu_)); } if (biases_.get() && sharedBiases_) { hl_create_tensor_descriptor(&biasDesc_); + hl_create_tensor_descriptor(&outputDesc_); hl_tensor_reshape(biasDesc_, 1, numFilters_ / groups_[0], 1, 1); biasOffset_ = numFilters_ / groups_[0]; } - batchNum_ = 0; - isSelectAlgo_ = false; return true; } -void CudnnConvLayer::allocConvWorkSpace(size_t maxWorkSpace) { - size_t totalWorkSpace = maxWorkSpace * maxGroups_; - - if (totalWorkSpace > workSpaceInBytes_) { - if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpaceData_); - } - // total amount of storage needed over all groups - workSpaceData_ = hl_malloc_device(totalWorkSpace); - - // update work space address for each group - for (int i = 0; i < maxGroups_; ++i) { - workSpace_[i] = reinterpret_cast(workSpaceData_) - + i * maxWorkSpace; - } - workSpaceInBytes_ = totalWorkSpace; - } -} - -void CudnnConvLayer::reshape(int batchSize) { - CHECK_NE(inputLayers_.size(), 0UL); - imageH_ = inputLayers_[0]->getOutput().getFrameHeight(); - imageW_ = inputLayers_[0]->getOutput().getFrameWidth(); - if (imageH_ == 0) imageH_ = imgSize_[0]; - if (imageW_ == 0) imageW_ = imgSize_[0]; - - for (size_t i = 1; i < inputLayers_.size(); i++) { - int imageH = inputLayers_[i]->getOutput().getFrameHeight(); - int imageW = inputLayers_[i]->getOutput().getFrameWidth(); - if (imageH) { - CHECK_EQ(imageH_, imageH) << "Inputs must have same height."; - } - if (imageW) { - CHECK_EQ(imageW_, imageW) << "Inputs must have same width."; - } - } - - outputH_ = outputSize(imageH_, filterSizeY_[0], paddingY_[0], strideY_[0]); - outputW_ = outputSize(imageW_, filterSize_[0], padding_[0], stride_[0]); - // check outputH & outputW - getOutput().setFrameHeight(outputH_); - getOutput().setFrameWidth(outputW_); - - // if the batchSize remains the same, set isSelectAlgo_ true. - // Otherwise, set isSelectAlgo_ false and select algo again. - isSelectAlgo_ = (batchSize == batchNum_); - batchNum_ = batchSize; - - size_t maxWorkSpace = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - CHECK_EQ(inputLayers_[i]->getOutput().value->getWidth(), - (size_t)(channels_[i] * imageH_ * imageW_)); - - hl_tensor_reshape(inputDesc_[i], batchSize, channels_[i] / groups_[i], - imageH_, imageW_, channels_[i] * imageH_ * imageW_, - imageH_ * imageW_, imageW_, 1); - - hl_tensor_reshape(outputDesc_[i], batchSize, numFilters_ / groups_[i], - outputH_, outputW_, numFilters_ * outputH_ * outputW_, - outputH_ * outputW_, outputW_, 1); - - hl_reset_convolution_descriptor(convDesc_[i], inputDesc_[i], - filterDesc_[i], paddingY_[i], - padding_[i], strideY_[i], stride_[i]); - - inputOffset_[i] = (channels_[i] / groups_[i]) * imageH_ * imageW_; - outputOffset_[i] = (numFilters_ / groups_[i]) * outputH_ * outputW_; - - if (!isSelectAlgo_) { - hl_conv_workspace(inputDesc_[i], outputDesc_[i], filterDesc_[i], - convDesc_[i], &fwdAlgo_[i], &fwdLimitBytes_[i], - &bwdDataAlgo_[i], &bwdDataLimitBytes_[i], - &bwdFilterAlgo_[i], &bwdFilterLimitBytes_[i]); - - maxWorkSpace = std::max(fwdLimitBytes_[i], bwdDataLimitBytes_[i]); - maxWorkSpace = std::max(maxWorkSpace, bwdFilterLimitBytes_[i]); - - VLOG(3) << getName() << " Fwd / BwdData / BwdFilter algo: " << fwdAlgo_[i] - << " / " << bwdDataAlgo_[i] - << " / " << bwdFilterAlgo_[i]; - } - } - - if (!isSelectAlgo_) { - allocConvWorkSpace(maxWorkSpace); - } - - isSelectAlgo_ = true; -} - void CudnnConvLayer::forward(PassType passType) { Layer::forward(passType); - int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - reshape(batchSize); - resetOutput(batchSize, outputH_ * outputW_ * numFilters_); + + int batchSize = getInput(0).getBatchSize(); + resetOutput(batchSize, calOutputSize()); for (size_t i = 0; i != inputLayers_.size(); ++i) { - REGISTER_TIMER_INFO("CudnnConvFwTimer", getName().c_str()); - for (int g = 0; g < groups_[i]; ++g) { - real *inputData = getInputValue(i)->getData() + inputOffset_[i] * g; - real *wgtData = weights_[i]->getW()->getData() + weightOffset_[i] * g; - real *outData = getOutputValue()->getData() + outputOffset_[i] * g; - hl_convolution_forward(inputDesc_[i], inputData, outputDesc_[i], - outData, filterDesc_[i], wgtData, - convDesc_[i], workSpace_[g], - fwdLimitBytes_[i], fwdAlgo_[i]); - } + projections_[i]->forward(&getInput(i), &getOutput(), passType); } if (biases_) { REGISTER_TIMER_INFO("CudnnConvBiasTimer", getName().c_str()); - addBiases(); - } - - forwardActivation(); -} - -void CudnnConvLayer::addBiases() { - if (sharedBiases_) { + int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); + hl_tensor_reshape(outputDesc_, batchSize, numFilters_ / groups_[0], + outputH_[0], outputW_[0], numFilters_ * outputH_[0] * outputW_[0], + outputH_[0] * outputW_[0], outputW_[0], 1); + outputOffset_ = getOutputValue()->getWidth() / groups_[0]; for (int g = 0; g < groups_[0]; ++g) { real *biasData = biases_->getW()->getData() + biasOffset_ * g; - real *outData = getOutputValue()->getData() + outputOffset_[0] * g; + real *outData = getOutputValue()->getData() + outputOffset_ * g; hl_convolution_forward_add_bias(biasDesc_, biasData, - outputDesc_[0], outData); + outputDesc_, outData); } - } else { - LOG(FATAL) << "Not supported"; } -} -void CudnnConvLayer::bpropBiases() { - if (sharedBiases_) { - for (int g = 0; g < groups_[0]; ++g) { - real *biasGrad = biases_->getWGrad()->getData() + biasOffset_ * g; - real *outGrad = getOutputGrad()->getData() + outputOffset_[0] * g; - hl_convolution_backward_bias(biasDesc_, biasGrad, - outputDesc_[0], outGrad); - } - } else { - LOG(FATAL) << "Not supported"; - } + forwardActivation(); } void CudnnConvLayer::backward(const UpdateCallback &callback) { @@ -238,52 +87,23 @@ void CudnnConvLayer::backward(const UpdateCallback &callback) { if (biases_ && biases_->getWGrad()) { REGISTER_TIMER_INFO("CudnnConvBpBiasTimer", getName().c_str()); - bpropBiases(); + for (int g = 0; g < groups_[0]; ++g) { + real *biasGrad = biases_->getWGrad()->getData() + biasOffset_ * g; + real *outGrad = getOutputGrad()->getData() + outputOffset_ * g; + hl_convolution_backward_bias(biasDesc_, biasGrad, outputDesc_, outGrad); + } biases_->getParameterPtr()->incUpdate(callback); } for (size_t i = 0; i != inputLayers_.size(); ++i) { - REGISTER_TIMER_INFO("CudnnConvBpTimer", getName().c_str()); - for (int g = 0; g < groups_[i]; ++g) { - real *outGrad = getOutputGrad()->getData() + outputOffset_[i] * g; - if (weights_[i]->getWGrad()) { - real *inputData = getInputValue(i)->getData() + inputOffset_[i] * g; - real *weightGrad = - weights_[i]->getWGrad()->getData() + weightOffset_[i] * g; - hl_convolution_backward_filter( - inputDesc_[i], inputData, outputDesc_[i], outGrad, filterDesc_[i], - weightGrad, convDesc_[i], workSpace_[g], bwdFilterLimitBytes_[i], - bwdFilterAlgo_[i]); - } - - MatrixPtr preGrad = getInputGrad(i); - if (NULL != preGrad) { - real *inputGrad = preGrad->getData() + inputOffset_[i] * g; - real *wgtData = weights_[i]->getW()->getData() + weightOffset_[i] * g; - hl_convolution_backward_data( - inputDesc_[i], inputGrad, outputDesc_[i], outGrad, filterDesc_[i], - wgtData, convDesc_[i], workSpace_[g], bwdDataLimitBytes_[i], - bwdDataAlgo_[i]); - } - } - weights_[i]->getParameterPtr()->incUpdate(callback); + projections_[i]->backward(callback); } } CudnnConvLayer::~CudnnConvLayer() { - if (biasDesc_) { + if (biases_) { hl_destroy_tensor_descriptor(biasDesc_); - } - - for (size_t i = 0; i < inputDesc_.size(); i++) { - hl_destroy_tensor_descriptor(inputDesc_[i]); - hl_destroy_tensor_descriptor(outputDesc_[i]); - hl_destroy_filter_descriptor(filterDesc_[i]); - hl_destroy_convolution_descriptor(convDesc_[i]); - } - if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpaceData_); - workSpaceInBytes_ = 0; + hl_destroy_tensor_descriptor(outputDesc_); } } diff --git a/paddle/gserver/layers/CudnnConvLayer.h b/paddle/gserver/layers/CudnnConvLayer.h index a6dadba10daa4..6390d96315cc4 100644 --- a/paddle/gserver/layers/CudnnConvLayer.h +++ b/paddle/gserver/layers/CudnnConvLayer.h @@ -17,12 +17,13 @@ limitations under the License. */ #include "ConvBaseLayer.h" #include "paddle/math/Matrix.h" +#include "Projection.h" #include namespace paddle { /** - * @brief A subclass of ConvBaseLayer by cuDNN implementation. It only + * @brief A 2-dimension conv layer implemented by cuDNN. It only * supports GPU mode. We automatic select CudnnConvLayer for GPU * mode and ExpandConvLayer for CPU mode if you set type of "conv". * User also can specfiy type of "exconv" or "cudnn_conv" for @@ -31,81 +32,21 @@ namespace paddle { * The config file api is img_conv_layer. */ class CudnnConvLayer : public ConvBaseLayer { -private: - /// resize Cudnn workspace size - void allocConvWorkSpace(size_t maxWorkSpace); - protected: - int imageH_, imageW_, outputH_, outputW_; - /// Cudnn tensor descriptor for bias. + std::vector> projConf_; + std::vector> projections_; + hl_tensor_descriptor biasDesc_; - /// Cudnn tensor descriptor for input. - std::vector inputDesc_; - /// Cudnn tensor descriptor for output. - std::vector outputDesc_; - /// Cudnn tensor descriptor for filter. - std::vector filterDesc_; - /// Cudnn tensor descriptor for a convolution operation. - std::vector convDesc_; - /// One sample offset of input data. - IntV inputOffset_; - /// One sample offset of output data. - IntV outputOffset_; - /// One group offset of weight. - IntV weightOffset_; - /// One group offset of bias. + hl_tensor_descriptor outputDesc_; int biasOffset_; - - /// Save the algorithm for forward convolution, which is obtained by cudnn - /// api to search the best suited algorithm. - std::vector fwdAlgo_; - /// Save the algorithm for computing convolution gradient with respect to - /// filter coefficients. - std::vector bwdFilterAlgo_; - /// Save the algorithm for computing convolution gradient with respect to - /// the output. - std::vector bwdDataAlgo_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// forward convolution with the specified algo. - std::vector fwdLimitBytes_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// backwardFilter with the specified algo. - std::vector bwdFilterLimitBytes_; - /// Amount of GPU memory needed as workspace to be able to execute a - /// backwardData with the specified algo. - std::vector bwdDataLimitBytes_; - - /// Device work space address for each group. - std::vector workSpace_; - /// Max number of groups. - int maxGroups_; - /// Total work space address in device for all groups. - void* workSpaceData_; - /// Size of total work space. - size_t workSpaceInBytes_; - - /// Is or not select conv algorihtm. - bool isSelectAlgo_; - - /// batchNum is used to record batch size. If the batch size is changed, - /// the selection algorithm will be called. - int batchNum_; + int outputOffset_; public: explicit CudnnConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} ~CudnnConvLayer(); - /** - * Intialization. Initialize member variables and create tenor descriptor. - */ bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - /** - * Reshape is done each forward. Reshape tensor decriptor - * inputDesc_, outputDesc_, convDesc_. And search the faster algo - * or the fastest algo within a given memeory limit. - */ - void reshape(int batchSize); void forward(PassType passType); void backward(const UpdateCallback& callback); void addBiases(); diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index df79c3e3037cf..80a6a62b5c0de 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -37,32 +37,29 @@ bool ExpandConvLayer::init(const LayerMap &layerMap, caffeMode_ = conf.caffe_mode(); } + /* initialize the weightList */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + size_t height, width; + height = filterPixels_[i] * filterChannels_[i]; + width = numFilters_; + + // create a new weight + CHECK_EQ(parameters_[i]->getSize(), width * height); + Weight* w = new Weight(height, width, parameters_[i]); + weights_.emplace_back(w); + } + return true; } -size_t ExpandConvLayer::getSize() { +size_t ExpandConvLayer::getOutputSize() { CHECK_NE(inputLayers_.size(), 0UL); - imgSizeH_.clear(); - imgSizeW_.clear(); - outputH_.clear(); - outputW_.clear(); + size_t layerSize = ConvBaseLayer::calOutputSize(); subN_.clear(); - size_t layerSize = 0; for (size_t i = 0; i < inputLayers_.size(); i++) { - imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (imgSizeH_[i] == 0) imgSizeH_[i] = imgSize_[i]; - if (imgSizeW_[i] == 0) imgSizeW_[i] = imgSize_[i]; - outputH_.push_back( - outputSize(imgSizeH_[i], filterSize_[i], padding_[i], stride_[i])); - outputW_.push_back( - outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); subN_.push_back(outputH_[i] * outputW_[i]); - CHECK(layerSize == 0 || subN_[i] * size_t(numFilters_) == layerSize); - layerSize = subN_[i] * numFilters_; } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); return layerSize; } @@ -119,7 +116,7 @@ void ExpandConvLayer::expandFwdOnce(MatrixPtr image, int inIdx, int startIdx) { } void ExpandConvLayer::addSharedBias() { - size_t mapW = getSize() / numFilters_; + size_t mapW = getOutputValue()->getWidth() / numFilters_; size_t mapH = getOutputValue()->getElementCnt() / mapW; MatrixPtr out = Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); @@ -158,7 +155,7 @@ void ExpandConvLayer::forward(PassType passType) { * transOutValue correspond sample to one row */ int batchSize = inputLayers_[0]->getOutputValue()->getWidth(); batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - resetOutput(batchSize, getSize()); + resetOutput(batchSize, getOutputSize()); MatrixPtr image = nullptr; for (size_t i = 0; i != inputLayers_.size(); ++i) { @@ -183,7 +180,7 @@ void ExpandConvLayer::forward(PassType passType) { } void ExpandConvLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = getSize() / numFilters_; + size_t mapW = v->getWidth() / numFilters_; size_t mapH = v->getElementCnt() / mapW; MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index fc3d69b1b7d14..030a3ba397ff4 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -37,14 +37,6 @@ class ExpandConvLayer : public ConvBaseLayer { IntV subN_; /// subK_ = channels_ * filterPixels_ * groups_. IntV subK_; - /// The spatial dimensions of height of input feature map. - IntV imgSizeH_; - /// The spatial dimensions of width of input feature map. - IntV imgSizeW_; - /// The spatial dimensions of height of output feature map. - IntV outputH_; - /// The spatial dimensions of width of output feature map. - IntV outputW_; /// Expand one sample at a time. shape: /// (numChannels * filterPixels_, outputSizeH * outputSizeW) MatrixPtr expandInput_; @@ -58,7 +50,7 @@ class ExpandConvLayer : public ConvBaseLayer { bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - size_t getSize(); + size_t getOutputSize(); /** * Create or resize expandInput_. diff --git a/paddle/gserver/layers/MixedLayer.cpp b/paddle/gserver/layers/MixedLayer.cpp index 054ddd3a228ed..26b1360290ffb 100644 --- a/paddle/gserver/layers/MixedLayer.cpp +++ b/paddle/gserver/layers/MixedLayer.cpp @@ -41,9 +41,13 @@ bool MixedLayer::init(const LayerMap& layerMap, } operators_.emplace_back(Operator::create(operator_conf, useGpu_)); } + /* initialize biases_ */ if (biasParameter_.get() != NULL) { - biases_ = std::unique_ptr(new Weight(1, getSize(), biasParameter_)); + sharedBias_ = config_.shared_biases(); + size_t psize = config_.bias_size(); + biases_ = std::unique_ptr( + new Weight(1, psize, biasParameter_)); } return true; @@ -119,12 +123,6 @@ void MixedLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); - /* add the bias-vector */ - if (biases_.get() != NULL) { - REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str()); - outV->addBias(*(biases_->getW()), 1); - } - for (size_t i = 0; i != inputLayers_.size(); ++i) { if (projections_[i]) { projections_[i]->forward(&getInput(i), &output_, passType); @@ -140,6 +138,12 @@ void MixedLayer::forward(PassType passType) { op->forward(ins, &output_, passType); } + /* add the bias-vector */ + if (biases_.get() != NULL) { + REGISTER_TIMER_INFO("FwBiasTimer", getName().c_str()); + outV->addBias(*(biases_->getW()), 1, sharedBias_); + } + /* activation */ { REGISTER_TIMER_INFO("FwAtvTimer", getName().c_str()); forwardActivation(); @@ -154,7 +158,7 @@ void MixedLayer::backward(const UpdateCallback& callback) { if (biases_ && biases_->getWGrad()) { REGISTER_TIMER_INFO("BpBiasTimer", getName().c_str()); - biases_->getWGrad()->collectBias(*getOutputGrad(), 1); + biases_->getWGrad()->collectBias(*getOutputGrad(), 1, sharedBias_); /* Increasing the number of gradient */ biases_->getParameterPtr()->incUpdate(callback); diff --git a/paddle/gserver/layers/MixedLayer.h b/paddle/gserver/layers/MixedLayer.h index 9bac1355bd21f..5842e51e1d79d 100644 --- a/paddle/gserver/layers/MixedLayer.h +++ b/paddle/gserver/layers/MixedLayer.h @@ -58,5 +58,6 @@ class MixedLayer : public Layer { /// the matrix size of projection state std::vector projectionStateMatrixSize_; std::unique_ptr biases_; + bool sharedBias_; }; } // namespace paddle diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/gserver/tests/LayerGradUtil.cpp index 552a6c5b41c7f..bc7bee0e4bbc8 100644 --- a/paddle/gserver/tests/LayerGradUtil.cpp +++ b/paddle/gserver/tests/LayerGradUtil.cpp @@ -669,12 +669,14 @@ void testLayerGrad(TestConfig testConf, string testLayerName, size_t batchSize, void testProjectionGrad(ProjectionConfig conf, InputType inputType, size_t parameterSize, size_t batchSize, bool useGpu, - bool testState) { + bool testState, int biasSize, bool sharedBias) { TestConfig config; conf.set_name(conf.type()); config.layerConfig.set_type("mixed"); config.layerConfig.set_size(conf.output_size()); - config.biasSize = config.layerConfig.size(); + config.biasSize = biasSize == 0 ? config.layerConfig.size() : biasSize; + config.layerConfig.set_bias_size(config.biasSize); + config.layerConfig.set_shared_biases(sharedBias); config.inputDefs.push_back( {inputType, "layer_0", conf.input_size(), parameterSize}); *config.layerConfig.add_inputs()->mutable_proj_conf() = conf; diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/gserver/tests/LayerGradUtil.h index 1e608dc0620ab..3b9ec803959b3 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/gserver/tests/LayerGradUtil.h @@ -217,7 +217,8 @@ void testLayerGrad(TestConfig testConf, string testLayerName, size_t batchSize, void testProjectionGrad(ProjectionConfig conf, InputType inputType, size_t parameterSize, size_t batchSize, bool useGpu, - bool testState = false); + bool testState = false, int biasSize = 0, + bool sharedBias = false); void testOperatorGrad(TestConfig& config, OperatorConfig& operatorConf, size_t batchSize, bool useGpu, bool testState = false); diff --git a/paddle/gserver/tests/img_conv_a.conf b/paddle/gserver/tests/img_conv_a.conf new file mode 100644 index 0000000000000..940589ed9ac24 --- /dev/null +++ b/paddle/gserver/tests/img_conv_a.conf @@ -0,0 +1,39 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) +data = data_layer(name ="input", size=8*16*16) +conv1 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) +conv2 = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=False, + act=ReluActivation()) + +concat = concat_layer(input=[conv1, conv2]) + +conv = img_conv_layer(input=data, filter_size=1, filter_size_y=1, + num_channels=8, + num_filters=16, stride=1, + bias_attr=True, + act=LinearActivation()) + +outputs(concat, conv) diff --git a/paddle/gserver/tests/img_conv_b.conf b/paddle/gserver/tests/img_conv_b.conf new file mode 100644 index 0000000000000..8ca9c94541504 --- /dev/null +++ b/paddle/gserver/tests/img_conv_b.conf @@ -0,0 +1,32 @@ +#edit-mode: -*- python -*- +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle.trainer_config_helpers import * + +settings(batch_size=10) +data = data_layer(name ="input", size=8*16*16) +proj1 = conv_projection(input=data, filter_size=1, filter_size_y=1, + num_channels=8, num_filters=16, stride=1) +proj2 = conv_projection(input=data, filter_size=1, filter_size_y=1, + num_channels=8, num_filters=16, stride=1) +concat = concat_layer(input=[proj1, proj2], bias_attr=False, act=ReluActivation()) + +proj = conv_projection(input=data, filter_size=1, filter_size_y=1, + num_channels=8, num_filters=16, stride=1) + +with mixed_layer(bias_attr=True, act=LinearActivation()) as conv: + conv += proj + +outputs(concat, conv) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index eab9bf84141a2..bf2c2e0499941 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -134,6 +134,45 @@ TEST(Projection, identity) { } } + +#ifndef PADDLE_ONLY_CPU +TEST(Projection, conv) { + const int NUM_FILTERS = 16; + const int FILTER_SIZE = 2; + const int FILTER_SIZE_Y = 3; + const int CHANNELS = 3; + const int IMAGE_SIZE = 16; + + ProjectionConfig conf; + conf.set_type("conv"); + conf.set_num_filters(NUM_FILTERS); + + ConvConfig* conv = conf.mutable_conv_conf(); + conv->set_filter_size(FILTER_SIZE); + conv->set_filter_size_y(FILTER_SIZE_Y); + conv->set_channels(CHANNELS); + conv->set_padding(0); + conv->set_padding_y(1); + conv->set_stride(2); + conv->set_stride_y(2); + conv->set_groups(1); + conv->set_filter_channels(conv->channels() / conv->groups()); + conv->set_img_size(IMAGE_SIZE); + int outputSize = (2 * conv->padding() + conv->img_size() - + conv->filter_size()) / conv->stride() + 1; + int outputSizeY = (2 * conv->padding_y() + conv->img_size() - + conv->filter_size_y()) / conv->stride_y() + 1; + conv->set_output_x(outputSize); + conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); + conf.set_output_size(outputSize * outputSizeY * NUM_FILTERS); + + testProjectionGrad(conf, INPUT_DATA, + /* parameterSize */ NUM_FILTERS * CHANNELS * FILTER_SIZE * FILTER_SIZE_Y, + /* batchSize */ 100, true, false, NUM_FILTERS, true); +} +#endif + + TEST(Layer, concat) { TestConfig config; config.biasSize = 0; diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/gserver/tests/test_NetworkCompare.cpp index b3ef53067301b..8d3eac5aca8d1 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/gserver/tests/test_NetworkCompare.cpp @@ -236,6 +236,15 @@ TEST(Compare, img_pool) { compareNetwork(config_file_a, config_file_b); FLAGS_use_gpu = useGpu; } + +TEST(Compare, img_conv) { + std::string config_file_a = "./gserver/tests/img_conv_a.conf"; + std::string config_file_b = "./gserver/tests/img_conv_b.conf"; + bool useGpu = FLAGS_use_gpu; + FLAGS_use_gpu = true; + compareNetwork(config_file_a, config_file_b); + FLAGS_use_gpu = useGpu; +} #endif diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 843eabc97d642..aaeae98f0d28b 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -340,6 +340,15 @@ void GpuMatrix::addBias(Matrix& b, real scale) { BaseMatrix::addBias(b, scale); } +void GpuMatrix::addSharedBias(Matrix& b, real scale) { + CHECK(b.getHeight() == 1) << "the Bias should be a vector"; + CHECK_LE(b.getWidth(), getWidth()); + CHECK_EQ(getWidth() % b.getWidth(), 0UL); + hl_matrix_add_shared_bias(getData(), b.getData(), b.getWidth(), + getHeight(), getWidth(), scale); +} + + void GpuMatrix::collectBias(Matrix& a, real scale) { CHECK_EQ(getHeight(), (size_t)1); CHECK_EQ(width_, a.getWidth()); @@ -354,6 +363,14 @@ void GpuMatrix::collectBias(Matrix& a, real scale) { } } +void GpuMatrix::collectSharedBias(Matrix& a, real scale) { + CHECK_EQ(getHeight(), (size_t)1); + CHECK_EQ(a.getWidth() % getWidth(), 0UL); + hl_matrix_collect_shared_bias(getData(), a.getData(), getWidth(), + a.getHeight(), a.getWidth(), scale); +} + + void GpuMatrix::sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode) { @@ -1983,6 +2000,24 @@ void CpuMatrix::addBias(Matrix& b, real scale) { } } +void CpuMatrix::addSharedBias(Matrix& b, real scale) { + CHECK_EQ(b.getHeight(), (size_t)1); + real* aData = getData(); + real* bData = b.getData(); + size_t numSamples = getHeight(); + size_t channel = b.getWidth(); + CHECK_EQ(getWidth() % channel, 0UL); + size_t dim = getWidth() / channel; + + for (size_t i = 0; i < numSamples; i++) { + for (size_t c = 0; c < channel; c++) { + for (size_t j = 0; j < dim; j++) { + aData[i * getStride() + c * dim + j] += scale * bData[c]; + } + } + } +} + void CpuMatrix::collectBias(Matrix& a, real scale) { CHECK_EQ(getHeight(), (size_t)1); CHECK_EQ(width_, a.getWidth()); @@ -2000,6 +2035,23 @@ void CpuMatrix::collectBias(Matrix& a, real scale) { } } +void CpuMatrix::collectSharedBias(Matrix& a, real scale) { + CHECK_EQ(getHeight(), (size_t)1); + real* B = getData(); + real* A = a.getData(); + size_t numSamples = a.getHeight(); + size_t channel = getWidth(); + CHECK_EQ(a.getWidth() % channel, 0UL); + size_t dim = a.getWidth() / channel; + for (size_t i = 0; i < numSamples; i++) { + for (size_t c = 0; c < channel; c++) { + for (size_t j = 0; j < dim; j++) { + B[c] += scale * A[i * channel * dim + c * dim + j]; + } + } + } +} + void CpuMatrix::sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode) { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 047c76a8604cc..52cbed528ca8b 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -343,11 +343,35 @@ class Matrix : public BaseMatrix { LOG(FATAL) << "Not implemented"; } + virtual void addSharedBias(Matrix& b, real scale) { + LOG(FATAL) << "Not implemented"; + } + + virtual void addBias(Matrix& b, real scale, bool sharedBias) { + if (!sharedBias) { + addBias(b, scale); + } else { + addSharedBias(b, scale); + } + } + /// add each sample from a to this. virtual void collectBias(Matrix& a, real scale) { LOG(FATAL) << "Not implemented"; } + virtual void collectSharedBias(Matrix& a, real scale) { + LOG(FATAL) << "Not implemented"; + } + + virtual void collectBias(Matrix& a, real scale, bool sharedBias) { + if (!sharedBias) { + collectBias(a, scale); + } else { + collectSharedBias(a, scale); + } + } + virtual void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode) { LOG(FATAL) << "Not implemented"; @@ -1021,6 +1045,7 @@ class GpuMatrix : public Matrix { /// add b to each sample of this. void addBias(Matrix& b, real scale); + void addSharedBias(Matrix& b, real scale); /** * @code @@ -1028,6 +1053,7 @@ class GpuMatrix : public Matrix { * @endcode */ void collectBias(Matrix& a, real scale); + void collectSharedBias(Matrix& a, real scale); void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode); @@ -1341,9 +1367,11 @@ class CpuMatrix : public Matrix { public: /// add b to each sample of this. void addBias(Matrix& b, real scale); + void addSharedBias(Matrix& b, real scale); /// add each sample of a to this. void collectBias(Matrix& a, real scale); + void collectSharedBias(Matrix& a, real scale); void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode); diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index ac160479a9dfc..0ddf7e0dfc386 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -21,6 +21,8 @@ limitations under the License. */ #include "paddle/math/SparseMatrix.h" #include #include "paddle/gserver/tests/TestUtil.h" +#include "paddle/utils/Stat.h" + using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -2071,6 +2073,60 @@ TEST(Matrix, MaxOutFwdBwd) { } } +void testAddSharedBias(int numSamples, int dim, int channel) { + MatrixPtr cpuData = std::make_shared(numSamples, dim); + MatrixPtr gpuData = std::make_shared(numSamples, dim); + + MatrixPtr cpuBias = std::make_shared(1, channel); + MatrixPtr gpuBias = std::make_shared(1, channel); + + cpuData->randomizeUniform(); + gpuData->copyFrom(*cpuData); + cpuBias->randomizeUniform(); + gpuBias->copyFrom(*cpuBias); + + cpuData->addSharedBias(*cpuBias, 1.0); + gpuData->addSharedBias(*gpuBias, 1.0); + + MatrixPtr check = std::make_shared(numSamples, dim); + check->copyFrom(*gpuData); + MatrixCheckErr(*cpuData, *check); +} + +void testCollectSharedBias(int numSamples, int dim, int channel) { + MatrixPtr cpuData = std::make_shared(numSamples, dim); + MatrixPtr gpuData = std::make_shared(numSamples, dim); + + MatrixPtr cpuBias = std::make_shared(1, channel); + MatrixPtr gpuBias = std::make_shared(1, channel); + + cpuData->randomizeUniform(); + gpuData->copyFrom(*cpuData); + cpuBias->randomizeUniform(); + gpuBias->copyFrom(*cpuBias); + + cpuBias->collectSharedBias(*cpuData, 1.0); + gpuBias->collectSharedBias(*gpuData, 1.0); + + MatrixPtr check = std::make_shared(1, channel); + check->copyFrom(*gpuBias); + MatrixCheckErr(*cpuBias, *check); +} + + +TEST(Matrix, sharedBias) { + for (auto numSamples : {1, 100, 520}) { + for (auto dim : {100 * 16, 100 * 32}) { + for (auto channel : {8, 16}) { + VLOG(3) << " numSamples=" << numSamples << " dim=" << dim + << " channel=" << channel; + testAddSharedBias(numSamples, dim, channel); + testCollectSharedBias(numSamples, dim, channel); + } + } + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/trainer/CMakeLists.txt b/paddle/trainer/CMakeLists.txt index 08b411d2ccbae..06c019f0a9775 100644 --- a/paddle/trainer/CMakeLists.txt +++ b/paddle/trainer/CMakeLists.txt @@ -7,6 +7,7 @@ set(TRAINER_SOURCES Tester.cpp Trainer.cpp TrainerInternal.cpp + TrainerBenchmark.cpp ThreadParameterUpdater.cpp TrainerInternalConfig.cpp TrainerConfigHelper.cpp) diff --git a/paddle/trainer/Trainer.h b/paddle/trainer/Trainer.h index 4f4811a139e74..7762722456c44 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/trainer/Trainer.h @@ -99,6 +99,7 @@ class Trainer { void startTrainPass(); void finishTrainPass(); void trainOneDataBatch(DataBatch& dataBatch); + void time(); /** * given a dataBatch and the current parameter value diff --git a/paddle/trainer/TrainerBenchmark.cpp b/paddle/trainer/TrainerBenchmark.cpp new file mode 100644 index 0000000000000..54862e95b4a73 --- /dev/null +++ b/paddle/trainer/TrainerBenchmark.cpp @@ -0,0 +1,71 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#undef PADDLE_DISABLE_TIMER + +#include "Trainer.h" +#include "paddle/utils/Stat.h" +#include "paddle/utils/Util.h" + +P_DECLARE_int32(test_period); + +P_DEFINE_bool(feed_data, false, "Wether to read data from DataProvider."); + +namespace paddle { + +void Trainer::time() { + startTrain(); + + trainerInternal_.getParameterUpdater()->startPass(); + evaluator_->start(); + + DataBatch dataBatch; + int32_t batchSize = config_->getOptConfig().batch_size(); + int32_t num = dataProvider_->getNextBatch(batchSize, &dataBatch); + CHECK_EQ(num, batchSize) << "The sample number is less than batch size " + << num << " != " << batchSize; + + CHECK(dataBatch.getSize()) << "No data from data provider"; + + std::vector outputs; + // burning time + LOG(INFO) << "Burning time..."; + for (int n = 0; n < 10; ++n) { + trainerInternal_.trainOneBatch(n, dataBatch, &outputs); + } + LOG(INFO) << "Burning time end."; + + for (int n = 0; n < FLAGS_test_period; n++) { + if (FLAGS_feed_data) { + REGISTER_TIMER("GetData"); + num = dataProvider_->getNextBatch(batchSize, &dataBatch); + } + + if (num != batchSize) { + break; + } + + { + REGISTER_TIMER("FwdBwd"); + trainerInternal_.trainOneBatch(n, dataBatch, &outputs); + } + } + globalStat.setThreadInfo(true); + globalStat.printSegTimerStatus(); + globalStat.reset(); + + finishTrain(); +} + +} // namespace paddle diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/trainer/TrainerMain.cpp index 94266639f94ad..a486cc383ace6 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/trainer/TrainerMain.cpp @@ -103,6 +103,8 @@ int main(int argc, char** argv) { trainer.checkGradient(); } else if (FLAGS_job == "test") { trainer.test(); + } else if (FLAGS_job == "time") { + trainer.time(); } else { LOG(FATAL) << "Unknown job type: " << FLAGS_job; } diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 70c1f8d563238..79e76b6bf1bdd 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -255,7 +255,7 @@ sinclude(`ModelConfigLayer.proto.m4') // (which is how convnets are usually trained). Setting this to // false will untie the biases, yielding a separate bias for // every location at which the filter is applied. - optional bool shared_biases = 8; + optional bool shared_biases = 8 [default = false]; // Valid values are ones that divide the area of the output // grid in this convolutional layer. For example if this layer @@ -379,6 +379,9 @@ sinclude(`ModelConfigLayer.proto.m4') // use to compute moving mean and variance. optional real moving_average_fraction = 47 [default = 0.9]; + + // bias size + optional uint32 bias_size = 48 [default = 0]; } message EvaluatorConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index fe8a5e5d48767..e9098943165fd 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -632,6 +632,44 @@ def calc_parameter_dims(self, input_size, output_size): _total_pad = 0 +@config_class +class ConvProjection(Projection): + type = 'conv' + + def __init__( + self, + input_layer_name, + num_filters=None, + conv_conf=None, + **xargs): + super(ConvProjection, self).__init__(input_layer_name, **xargs) + + if num_filters is not None: + self.proj_conf.num_filters = num_filters + + parse_conv(conv_conf, + input_layer_name, + self.proj_conf.conv_conf) + # TODO: support rectangle input + self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x ** 2) * num_filters + + def calc_output_size(self, input_layer_config): + return self.proj_conf.output_size + + def calc_parameter_size(self, input_size, output_size): + co = self.proj_conf.num_filters + ci = self.proj_conf.conv_conf.channels + fh = self.proj_conf.conv_conf.filter_size + fw = self.proj_conf.conv_conf.filter_size_y + return co * ci * fh * fw + + def calc_bias_size(self): + return self.proj_conf.num_filters + + def calc_parameter_dims(self, input_size, output_size): + return None + + # Define a operator for mixed layer @config_class class Operator(Cfg): @@ -2528,8 +2566,15 @@ def __init__( record_operator_conf = self.config.operator_confs.add() record_operator_conf.CopyFrom(operator_conf) + psize = self.config.size + if isinstance(self.inputs[0], ConvProjection): + self.config.shared_biases = True + psize = 0 + for input in self.inputs: + psize += input.calc_bias_size() - self.create_bias_parameter(bias, self.config.size) + self.config.bias_size = psize + self.create_bias_parameter(bias, psize) if error_clipping_threshold is not None: self.config.error_clipping_threshold = error_clipping_threshold @@ -2547,8 +2592,10 @@ def __init__( self, name, inputs, + bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') + config_assert(not bias, 'ConcatenateLayer cannot support bias.') super(ConcatenateLayer, self).__init__( name, 'concat', 0, inputs=inputs, **xargs) size = 0 @@ -2567,10 +2614,19 @@ def __init__( self, name, inputs, + bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') super(ConcatenateLayer2, self).__init__( name, 'concat2', 0, inputs=inputs, **xargs) + + if isinstance(self.inputs[0], ConvProjection): + for input_index in xrange(len(self.inputs) - 1): + input = self.inputs[input_index + 1] + config_assert(isinstance(input, ConvProjection), + "The first input of ConcatenateLayer2 is ConvProjection, " + "the other inputs should also be ConvProjection.") + size = 0 for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) @@ -2596,6 +2652,16 @@ def __init__( input.proj_conf.output_size) self.create_input_parameter(input_index, psize, dims) + psize = self.config.size + if isinstance(self.inputs[0], ConvProjection): + self.config.shared_biases = True + psize = 0 + for input in self.inputs: + psize += input.calc_bias_size() + + self.config.bias_size = psize + self.create_bias_parameter(bias, psize) + @config_layer('recurrent') class RecurrentLayer(LayerBase): def __init__( diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 7df9108ae82a4..9a23c02431d18 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -34,7 +34,7 @@ "table_projection", "mixed_layer", "data_layer", "embedding_layer", "fc_layer", "grumemory", "pooling_layer", "lstmemory", "last_seq", "first_seq", - "cos_sim", "hsigmoid", + "cos_sim", "hsigmoid", "conv_projection", "regression_cost", 'classification_cost', "LayerOutput", 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', 'img_cmrnorm_layer', 'addto_layer', @@ -1984,7 +1984,7 @@ def addto_layer(input, act=None, name=None, bias_attr=None, @wrap_act_default(act=IdentityActivation()) @wrap_name_default("concat") @layer_support() -def concat_layer(input, act=None, name=None, layer_attr=None): +def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None): """ Concat all input vector into one huge vector. Inputs can be list of LayerOutput or list of projection. @@ -2043,10 +2043,14 @@ def __reduce_concat_type__(a, b): layer_type = (LayerType.CONCAT_LAYER if is_concat_layer else LayerType.CONCAT_PROJ_LAYER) + if layer_type == LayerType.CONCAT_LAYER: + assert not bias_attr + Layer( name=name, type=layer_type, inputs=[x.name for x in input] if is_concat_layer else input, active_type=act.name, + bias=ParamAttr.to_bias(bias_attr), **ExtraLayerAttribute.to_kwargs(layer_attr) ) @@ -2950,6 +2954,103 @@ def conv_operator(img, filter, filter_size, num_filters, op.origin = [img, filter] return op +@wrap_param_attr_default() +def conv_projection(input, filter_size, num_filters, + num_channels=None, stride=1, padding=0, + filter_size_y=None, stride_y=None, padding_y=None, + groups=1, param_attr=None): + """ + ConvProjection with a layer as input. + It performs element-wise multiplication with weight. + + Different from img_conv_layer and conv_op, conv_projection is an Projection, + which can be used in mixed_layer and conat_layer. It use cudnn to implement + conv and only support GPU mode. + + The example usage is: + + .. code-block:: python + + proj = conv_projection(img=input1, + filter_size=3, + num_filters=64, + num_channels=64) + + :param input: input layer + :type input: LayerOutput + :param filter_size: The x dimension of a filter kernel. + :type filter_size: int + :param filter_size_y: The y dimension of a filter kernel. Since + PaddlePaddle now supports rectangular filters, + the filter's shape can be (filter_size, filter_size_y). + :type filter_size_y: int + :param num_filters: channel of output data. + :type num_filters: int + :param num_channel: channel of input data. + :type num_channel: int + :param stride: The x dimension of the stride. + :type stride: int + :param stride_y: The y dimension of the stride. + :type stride_y: int + :param padding: The x dimension of padding. + :type padding: int + :param padding_y: The y dimension of padding. + :type padding_y: int + :param groups: The group number. + :type groups: int + :param param_attr: Convolution param attribute. None means default attribute + :type param_attr: ParameterAttribute + :return: A DotMulProjection Object. + :rtype: DotMulProjection + """ + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters + + if filter_size_y is None: + if isinstance(filter_size, collections.Sequence): + assert len(filter_size) == 2 + filter_size, filter_size_y = filter_size + else: + filter_size_y = filter_size + + if stride_y is None: + if isinstance(stride, collections.Sequence): + assert len(stride) == 2 + stride, stride_y = stride + else: + stride_y = stride + + if padding_y is None: + if isinstance(padding, collections.Sequence): + assert len(padding) == 2 + padding, padding_y = padding + else: + padding_y = padding + + if param_attr.attr.get('initial_smart'): + # special initial for conv layers. + init_w = (2.0 / (filter_size ** 2 * num_channels)) ** 0.5 + param_attr.attr["initial_mean"] = 0.0 + param_attr.attr["initial_std"] = init_w + param_attr.attr["initial_strategy"] = 0 + param_attr.attr["initial_smart"] = False + + proj = ConvProjection(input_layer_name=input.name, + num_filters=num_filters, + conv_conf=Conv(filter_size=filter_size, + padding=padding, + stride=stride, + channels=num_channels, + filter_size_y=filter_size_y, + padding_y=padding_y, + stride_y=stride_y, + groups=groups), + **param_attr.attr) + + proj.origin = input + return proj + @wrap_name_default() @layer_support() diff --git a/python/paddle/trainer_config_helpers/networks.py b/python/paddle/trainer_config_helpers/networks.py index 65512b327cdc6..bce88f93626ec 100644 --- a/python/paddle/trainer_config_helpers/networks.py +++ b/python/paddle/trainer_config_helpers/networks.py @@ -29,7 +29,7 @@ "img_conv_bn_pool", 'dropout_layer', 'lstmemory_group', 'lstmemory_unit', 'small_vgg', 'img_conv_group', 'vgg_16_network', 'gru_unit', 'gru_group', 'simple_gru', 'simple_attention', - 'text_conv_pool', + 'simple_gru2', 'bidirectional_gru', 'text_conv_pool', 'bidirectional_lstm', 'inputs', 'outputs'] @@ -811,22 +811,37 @@ def simple_gru(input, gru_layer_attr=None ): """ - simple_gru is also a recurrent layer group version Gated Recurrent Unit as - gru_group. The difference only lies in implemention details. + You maybe see gru_step_layer, grumemory in layers.py, gru_unit, gru_group, + simple_gru in network.py. The reason why there are so many interfaces is + that we have two ways to implement recurrent neural network. One way is to + use one complete layer to implement rnn (including simple rnn, gru and lstm) + with multiple time steps, such as recurrent_layer, lstmemory, grumemory. But, + the multiplication operation :math:`W x_t` is not computed in these layers. + See details in their interfaces in layers.py. + The other implementation is to use an recurrent group which can ensemble a + series of layers to compute rnn step by step. This way is flexible for + attenion mechanism or other complex connections. + + - gru_step_layer: only compute rnn by one step. It needs an memory as input + and can be used in recurrent group. + - gru_unit: a wrapper of gru_step_layer with memory. + - gru_group: a GRU cell implemented by a combination of multiple layers in + recurrent group. + But :math:`W x_t` is not done in group. + - gru_memory: a GRU cell implemented by one layer, which does same calculation + with gru_group and is faster than gru_group. + - simple_gru: a complete GRU implementation inlcuding :math:`W x_t` and + gru_group. :math:`W` contains :math:`W_r`, :math:`W_z` and :math:`W`, see + formula in grumemory. + The computational speed is that, grumemory is relatively better than gru_group, and gru_group is relatively better than simple_gru. - simple_gru does exactly the same calculation as the grumemory layer does. - Please see grumemory in layers.py for more detail about the maths. - The example usage is: .. code-block:: python - gru = gur_group(input=[layer1], - size=256, - act=TanhActivation(), - gate_act=SigmoidActivation()) + gru = simple_gru(input=[layer1], size=256) :param input: input layer name. :type input: LayerOutput @@ -863,6 +878,132 @@ def simple_gru(input, gru_layer_attr=gru_layer_attr) +@wrap_name_default('simple_gru2') +def simple_gru2(input, + size, + name=None, + reverse=False, + mixed_param_attr=None, + mixed_bias_attr=None, + gru_param_attr=None, + gru_bias_attr=None, + act=None, + gate_act=None, + mixed_layer_attr=None, + gru_cell_attr=None + ): + """ + simple_gru2 is the same with simple_gru, but using grumemory instead + Please see grumemory in layers.py for more detail about the maths. + simple_gru2 is faster than simple_gru. + + The example usage is: + + .. code-block:: python + + gru = simple_gru2(input=[layer1], size=256) + + :param input: input layer name. + :type input: LayerOutput + :param name: name of the gru group. + :type name: basestring + :param size: hidden size of the gru. + :type size: int + :param reverse: whether to process the input data in a reverse order + :type reverse: bool + :param act: type of the activiation + :type act: BaseActivation + :param gate_act: type of the gate activiation + :type gate_act: BaseActivation + :param gru_bias_attr: bias. False means no bias, None means default bias. + :type gru_bias_attr: ParameterAttribute|False + :param gru_layer_attr: Extra parameter attribute of the gru layer. + :type gru_layer_attr: ParameterAttribute|False + :return: the gru group. + :rtype: LayerOutput + """ + with mixed_layer(name='%s_transform' % name, + size=size * 3, + bias_attr=mixed_bias_attr, + layer_attr=mixed_layer_attr) as m: + m += full_matrix_projection(input=input, param_attr=mixed_param_attr) + + return grumemory(name=name, + size=size, + input=m, + reverse=reverse, + bias_attr=gru_bias_attr, + param_attr=gru_param_attr, + act=act, + gate_act=gate_act, + layer_attr=gru_cell_attr) + + +@wrap_name_default("bidirectional_gru") +def bidirectional_gru(input, size, name=None, return_seq=False, + fwd_mixed_param_attr=None, fwd_mixed_bias_attr=None, + fwd_gru_param_attr=None, fwd_gru_bias_attr=None, + fwd_act=None, fwd_gate_act=None, + fwd_mixed_layer_attr=None, fwd_gru_cell_attr=None, + + bwd_mixed_param_attr=None, bwd_mixed_bias_attr=None, + bwd_gru_param_attr=None, bwd_gru_bias_attr=None, + bwd_act=None, bwd_gate_act=None, + bwd_mixed_layer_attr=None, bwd_gru_cell_attr=None, + + last_seq_attr=None, first_seq_attr=None, + concat_attr=None, concat_act=None): + """ + A bidirectional_gru is a recurrent unit that iterates over the input + sequence both in forward and bardward orders, and then concatenate two + outputs to form a final output. However, concatenation of two outputs + is not the only way to form the final output, you can also, for example, + just add them together. + + The example usage is: + + .. code-block:: python + + bi_gru = bidirectional_gru(input=[input1], size=512) + + :param name: bidirectional gru layer name. + :type name: basestring + :param input: input layer. + :type input: LayerOutput + :param size: gru layer size. + :type size: int + :param return_seq: If set False, outputs of the last time step are + concatenated and returned. + If set True, the entire output sequences that are + processed in forward and backward directions are + concatenated and returned. + :type return_seq: bool + :return: LayerOutput object. + :rtype: LayerOutput + """ + args = locals() + + fw = simple_gru2(name='%s_fw' % name, input=input, size=size, + **dict((k[len('fwd_'):], v) for k, v in args.iteritems() + if k.startswith('fwd_'))) + + bw = simple_gru2(name="%s_bw" % name, input=input, size=size, + reverse=True, + **dict((k[len('bwd_'):], v) for k, v in args.iteritems() + if k.startswith('bwd_'))) + + if return_seq: + return concat_layer(name=name, input=[fw, bw], layer_attr=concat_attr, + act=concat_act) + else: + fw_seq = last_seq(name="%s_fw_last" % name, input=fw, + layer_attr=last_seq_attr) + bw_seq = first_seq(name="%s_bw_last" % name, input=bw, + layer_attr=first_seq_attr) + return concat_layer(name=name, input=[fw_seq, bw_seq], + layer_attr=concat_attr, act=concat_act) + + @wrap_name_default("bidirectional_lstm") def bidirectional_lstm(input, size, name=None, return_seq=False, fwd_mat_param_attr=None, fwd_bias_param_attr=None, @@ -893,7 +1034,7 @@ def bidirectional_lstm(input, size, name=None, return_seq=False, .. code-block:: python - lstm_step = bidirectional_lstm(input=[input1], size=512) + bi_lstm = bidirectional_lstm(input=[input1], size=512) :param name: bidirectional lstm layer name. :type name: basestring @@ -907,7 +1048,7 @@ def bidirectional_lstm(input, size, name=None, return_seq=False, processed in forward and backward directions are concatenated and returned. :type return_seq: bool - :return: lstm layer name. + :return: LayerOutput object accroding to the return_seq. :rtype: LayerOutput """ args = locals() diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index d1b22b34903df..72dfdad7bdd40 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -1,10 +1,11 @@ 86c0815275a9d5eb902e23c6a592f58a img_layers.protostr a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 9c038249ec8ff719753a746cdb04c026 layer_activations.protostr -5913f87b39cee3b2701fa158270aca26 projections.protostr +34e04043cbb12931c47fa44ec50eeffc projections.protostr 7334ba0a4544f0623231330fc51d390d shared_fc.protostr -8b8b6bb128a7dfcc937be86145f53e2f shared_lstm.protostr +bb8e233b05b8e07f9ed386b7aee4f2c6 shared_lstm.protostr 6b39e34beea8dfb782bee9bd3dea9eb5 simple_rnn_layers.protostr +f98e79e1630d5eb827c300e64836d269 test_bi_grumemory.protostr 0fc1409600f1a3301da994ab9d28b0bf test_cost_layers.protostr 6cd5f28a3416344f20120698470e0a4c test_cost_layers_with_weight.protostr 144bc6d3a509de74115fa623741797ed test_expand_layer.protostr @@ -15,7 +16,7 @@ d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr 5433ed33d4e7414eaf658f2a55946186 test_maxout.protostr 251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr -2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr +fded24727338fb8ce44d9951ed8aea08 test_rnn_group.protostr 67d6fde3afb54f389d0ce4ff14726fe1 test_sequence_pooling.protostr f586a548ef4350ba1ed47a81859a64cb unused_layers.protostr -8122477f4f65244580cec09edc590041 util_layers.protostr +f937a5a6e7e8864b4d8cf56b0f7c7f44 util_layers.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 4b1d2d3d41d52..6a31ceabdf36d 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -9,7 +9,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -test_maxout) +test_maxout test_bi_grumemory) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py new file mode 100644 index 0000000000000..ab9f7c4948b85 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bi_grumemory.py @@ -0,0 +1,10 @@ +from paddle.trainer_config_helpers import * + +settings( + batch_size=1000, + learning_rate=1e-4 +) + +din = data_layer(name='data', size=120) + +outputs(bidirectional_gru(input=din, size=40, return_seq=True)) From 7180b4246d085913390e4a338bc3f1decf77c44d Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 1 Nov 2016 23:55:46 -0500 Subject: [PATCH 061/180] Add some concepts documents to guide user for using paddle (#249) --- demo/quick_start/dataprovider_emb.py | 1 + demo/quick_start/trainer_config.lstm.py | 15 +- doc_cn/concepts/nn.rst | 3 + doc_cn/concepts/program_concepts.rst | 4 + doc_cn/concepts/pserver_topology.dot | 68 +++++++++ doc_cn/concepts/trainer_config.py | 23 +++ doc_cn/concepts/use_concepts.rst | 191 ++++++++++++++++++++++++ doc_cn/faq/index.rst | 10 ++ doc_cn/index.rst | 1 + 9 files changed, 305 insertions(+), 11 deletions(-) create mode 100644 doc_cn/concepts/nn.rst create mode 100644 doc_cn/concepts/program_concepts.rst create mode 100644 doc_cn/concepts/pserver_topology.dot create mode 100644 doc_cn/concepts/trainer_config.py create mode 100644 doc_cn/concepts/use_concepts.rst diff --git a/demo/quick_start/dataprovider_emb.py b/demo/quick_start/dataprovider_emb.py index ca940a89e5477..f5632d5f3f8bd 100755 --- a/demo/quick_start/dataprovider_emb.py +++ b/demo/quick_start/dataprovider_emb.py @@ -16,6 +16,7 @@ UNK_IDX = 0 + def initializer(settings, dictionary, **kwargs): settings.word_dict = dictionary settings.input_types = [ diff --git a/demo/quick_start/trainer_config.lstm.py b/demo/quick_start/trainer_config.lstm.py index ec8a2cb00abd1..b412a9cbd914d 100644 --- a/demo/quick_start/trainer_config.lstm.py +++ b/demo/quick_start/trainer_config.lstm.py @@ -42,20 +42,13 @@ gradient_clipping_threshold=25 ) -bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) data = data_layer(name="word", size=len(word_dict)) emb = embedding_layer(input=data, size=128) -fc = fc_layer(input=emb, size=512, - act=LinearActivation(), - bias_attr=bias_attr, - layer_attr=ExtraAttr(drop_rate=0.1)) -lstm = lstmemory(input=fc, act=TanhActivation(), - bias_attr=bias_attr, - layer_attr=ExtraAttr(drop_rate=0.25)) -lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_last, size=2, - bias_attr=bias_attr, +lstm = simple_lstm(input=emb, size=128, + lstm_cell_attr=ExtraAttr(drop_rate=0.25)) +lstm_max = pooling_layer(input=lstm, pooling_type=MaxPooling()) +output = fc_layer(input=lstm_max, size=2, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(output) diff --git a/doc_cn/concepts/nn.rst b/doc_cn/concepts/nn.rst new file mode 100644 index 0000000000000..f4d2cf490d147 --- /dev/null +++ b/doc_cn/concepts/nn.rst @@ -0,0 +1,3 @@ +TBD + +目前正在书写中。敬请期待。 \ No newline at end of file diff --git a/doc_cn/concepts/program_concepts.rst b/doc_cn/concepts/program_concepts.rst new file mode 100644 index 0000000000000..af5bbdac260af --- /dev/null +++ b/doc_cn/concepts/program_concepts.rst @@ -0,0 +1,4 @@ +TBD +### + +目前正在书写中。敬请期待。 \ No newline at end of file diff --git a/doc_cn/concepts/pserver_topology.dot b/doc_cn/concepts/pserver_topology.dot new file mode 100644 index 0000000000000..9ff658b849503 --- /dev/null +++ b/doc_cn/concepts/pserver_topology.dot @@ -0,0 +1,68 @@ +graph pp_topology { + rankdir=BT; + subgraph cluster_node0 { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "机器0" + + pserver0 [label="Parameter \n Server 0"] + trainer0 [label="Trainer 0"] + } + subgraph cluster_node1 { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "机器1" + + pserver1 [label="Parameter \n Server 1"] + trainer1 [label="Trainer 1"] + } + + subgraph cluster_node2 { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "机器2" + + pserver2 [label="Parameter \n Server 2"] + trainer2 [label="Trainer 2"] + } + + subgraph cluster_node3 { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "机器3" + + pserver3 [label="Parameter \n Server 3"] + trainer3 [label="Trainer 3"] + } + + data [label="数据", shape=hexagon] + + trainer0 -- pserver0 + trainer0 -- pserver1 + trainer0 -- pserver2 + trainer0 -- pserver3 + + trainer1 -- pserver0 + trainer1 -- pserver1 + trainer1 -- pserver2 + trainer1 -- pserver3 + + trainer2 -- pserver0 + trainer2 -- pserver1 + trainer2 -- pserver2 + trainer2 -- pserver3 + + trainer3 -- pserver0 + trainer3 -- pserver1 + trainer3 -- pserver2 + trainer3 -- pserver3 + + data -- trainer0 + data -- trainer1 + data -- trainer2 + data -- trainer3 +} diff --git a/doc_cn/concepts/trainer_config.py b/doc_cn/concepts/trainer_config.py new file mode 100644 index 0000000000000..8d8c79fb39e0c --- /dev/null +++ b/doc_cn/concepts/trainer_config.py @@ -0,0 +1,23 @@ +from paddle.trainer_config_helpers import * + +define_py_data_sources2(train_list='train.list', + test_list='test.list', + module='provider', + obj='process') +settings( + batch_size=128, + learning_rate=1e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(0.5) +) + +img = data_layer(name='pixel', size=28 * 28) + +hidden1 = simple_img_conv_pool(input=img, filter_size=3, num_filters=32, pool_size=3, + num_channel=1) + +hidden2 = fc_layer(input=hidden1, size=200, act=TanhActivation(), + layer_attr=ExtraAttr(drop_rate=0.5)) +predict = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) + +outputs(classification_cost(input=predict, label=data_layer(name='label', size=10))) diff --git a/doc_cn/concepts/use_concepts.rst b/doc_cn/concepts/use_concepts.rst new file mode 100644 index 0000000000000..67e98edabc0c2 --- /dev/null +++ b/doc_cn/concepts/use_concepts.rst @@ -0,0 +1,191 @@ +######################### +PaddlePaddle 基本使用概念 +######################### + +PaddlePaddle是一个神经网络学习框架。其单机进程为 :code:`paddle train`。 单机的所有设备使用,均在单机进程内调度完成。 而多机辅助进程 :code:`paddle pserver` 负责联合多个单机进程进行通信,进而充分利用集群的计算资源。 PaddlePaddle同时以 :code:`swig api` 的形式,提供训练结果模型预测的方法和自定义训练流程。 + +下面我们会分别介绍主要进程 :code:`paddle train` 中的一些概念。这些概念会对如何使用PaddlePaddle有一定的帮助。 了解这些概念的前提是,读者已经了解 `基本的神经网络/机器学习原理和概念 `_ 。同时,如果想要了解PaddlePaddle实现中的一些概念,请参考 `PaddlePaddle 编程中的基本概念 `_ 。 + +.. contents:: + +PaddlePaddle 的进程模型 +======================= + +PaddlePaddle进程内嵌了一个 :code:`python` 解释器。 这个 :code:`python` 解释器负责解析用户定义的神经网络配置,和解析用户数据,并将用户数据传入给 PaddlePaddle。 + +.. graphviz:: + + digraph pp_process { + rankdir=LR; + config_file [label="用户神经网络配置"]; + subgraph cluster_pp { + style=filled; + color=lightgrey; + node [style=filled, color=white, shape=box]; + label = "PaddlePaddle C++"; + py [label="Python解释器"]; + } + data_provider [label="用户数据解析"]; + config_file -> py; + py -> data_provider [dir="back"]; + } + +所以,PaddlePaddle单机训练进程,:code:`paddle train` , 对于用户的主要接口语言为 python。 主要需要用户配置的两个文件为 :code:`DataProvider` 和训练文件 :code:`TrainerConfig` 。 + + +DataProvider +============ + +DataProvider是 :code:`paddle train` 的数据提供器。 它负责将用户的原始数据转换成 PaddlePaddle 可以识别的数据类型。每当 PaddlePaddle 需要新的数据训练时,都会调用 DataProvider 返回数据。 当所有数据读取完一轮后,DataProvider 便返回空数据通知 PaddlePaddle。PaddlePaddle负责在下一轮训练开始前,将DataProvider重置。 + +需要注意的是,DataProvider在PaddlePaddle中是被训练逻辑调用的关系, 而不是新的数据驱动训练。并且所有的 :code:`shuffle` , 和一些随机化的噪声添加,都应该在 DataProvider 阶段完成。 + +为了方便用户使用自己的数据格式, PaddlePaddle 提供了 `PyDataProvider`_ 来处理数据。 并且在这个Provider中,PaddlePaddle的 C++ 部分接管了如何shuffle,处理 batch,GPU/CPU通信,双缓冲,异步读取等问题。 用户可以参考 `PyDataProvider`_ 的相关文档,继续深入了解 DataProvider 的使用。 + + +训练文件 +======== + +训练文件是PaddlePaddle中配置神经网络结构、学习优化算法、数据传入方式的地方。 训练文件是一个python文件,使用命令行参数 :code:`--config` 传给 paddle 的主程序。 例如\: + +.. code-block:: bash + + paddle train --config=trainer_config.py + +一个典型简单的训练文件可能为 + +.. literalinclude:: trainer_config.py + :linenos: + +下面我们详细的介绍一下训练文件中各个模块的概念。 + + +trainer_config_helpers +---------------------- + +PaddlePaddle的配置文件与PaddlePaddle C++端通信的最基础协议是 :code:`protobuf` 。而为了避免用户直接写比较难写的 protobuf string,我们书写了一个helpers来生成这个protobuf包。所以在文件的开始,import这些helpers函数。 + +需要注意的是,这个 :code:`paddle.trainer_config_helpers` 包是标准的python包,这意味着用户可以选择自己喜欢的 :code:`ide` 或者编辑器来编写Paddle的配置文件,这个python包注释文档比较完善,并且考虑了IDE的代码提示与类型注释。 + +data_sources +------------ + +data_sources是配置神经网络的数据源。这里使用的函数是 :code:`define_py_data_sources2` ,这个函数是定义了使用 `PyDataProvider`_ 作为数据源。 而后缀 :code:`2` 是Paddle历史遗留问题,因为Paddle之前使用的 PyDataProvider 性能较差,所以完全重构了一个新的 `PyDataProvider`_ 。 + +data_sources里面的 train_list 和 test_list 指定的是训练文件列表和测试文件列表。 如果传入一个字符串的话,是指一个训练列表文件。这个训练列表文件中包含的是每一个训练或者测试文件的路径。如果传入一个list的话,则会默认生成一个 list 文件,再传入给 train.list 或者 test.list 。 + +而 :code:`module` 和 :code:`obj` 指定了 DataProvider 的模块名和函数名。 + +更具体的使用,请参考 `PyDataProvider`_ 。 + +settings +-------- + +`settings`_ 是神经网络训练算法相关的设置项。包括学习率,batch_size,优化算法,正则方法等等。具体的使用方法请参考 `settings`_ 文档。 + +网络配置 +-------- + +上述网络配置中余下的部分均是神经网络配置。第一行是定义一个名字叫 "pixel" 的 :code:`data_layer` 。每一个layer返回的都是一个 :code:`LayerOutput` 对象。 这里第一层的输出对象是 :code:`img` 。然后这个对象传输给了另一个 layer 函数, +:code:`simple_img_conv_pool` 。:code:`simple_img_conv_pool` 是一个组合层, +包括了图像的卷积 (convolution) 和池化(pooling), +并继续接了一个全连接层( :code:`fc_layer` ),然后再接了一个Softmax的全连接层。 + +最终,网络配置输出了 :code:`classification_cost` 。标记网络输出的函数为 +:code:`outputs` 。网络的输出是神经网络的优化目标,神经网络训练的时候,实际上就是 +要最小化这个输出。 + +在神经网络进行预测的时候,实际上网络的输出也是通过 :code:`outputs` 标记。 + + +Layer、Projection、Operator +=========================== + +PaddlePaddle的网络基本上是基于Layer来配置的。所谓的Layer即是神经网络的某一层, +而神经网络的某一层,一般是封装了许多复杂操作的操作集合。比如最简单的 +:code:`fc_layer` ,也包括矩阵乘法,多输入的求和,和activation。 + +.. code-block:: python + + data = data_layer(name='data', size=200) + out = fc_layer(input=data, size=200, act=TanhActivation()) + +而对于更灵活配置需求,可能这样基于Layer的配置是不灵活的。于是 PaddlePaddle 提供 +了基于 Projection 或者 Operator 的配置。使用Projection和Operator需要与 +:code:`mixed_layer` 配合使用。 :code:`mixed_layer` 是将layer中的元素累加求和, +并且做一个 :code:`activation` , 而这个layer具体如何计算,是交由内部的Projection +和 Operator 定义。Projection是指含有可学习参数的操作,而Operator不含有可学习的 +参数,输入全是其他Layer的输出。 + + +例如,和 :code:`fc_layer` 同样功能的 :code:`mixed_layer` 。 + +.. code-block:: python + + data = data_layer(name='data', size=200) + with mixed_layer(size=200) as out: + out += full_matrix_projection(input=data) + +PaddlePaddle可以使用的mixed layer 配置出非常复杂的网络,甚至可以直接配置一个完整的LSTM。 +用户可以参考 `mixed_layer`_ 的相关文档进行配置。 + +如何利用单机的所有GPU或所有CPU核心 +================================== + +PaddlePaddle的单机进程 :code:`paddle train` 可以充分利用一台计算机上所有的GPU资 +源或者CPU。 + +如果要使用机器上多块GPU,使用如下命令即可\: + +.. code-block:: bash + + paddle train --use_gpu=true --trainer_count=4 # use 4 gpu card, 0, 1, 2, 3 + +如果要使用机器上多块CPU, 使用如下命令即可\: + +.. code-block:: bash + + paddle train --trainer_config=4 # use 4 cpu cores. + +对于其他设置GPU的选择情况,例如选择第0、2号GPU显卡,则可以使用 :code:`CUDA_VISIBLE_DEVICES` 环境变量来选择部分的显卡。 具体可以参考连接`masking-gpus`_ 。 可以使用的命令为 + +.. code-block:: bash + + env CUDA_VISIBLE_DEVICES=0,2 paddle train --use_gpu=true --trainer_config=2 + +如何利用多台机器的计算资源训练神经网络 +====================================== + +PaddlePaddle多机使用的经典方法是通过 :code:`Parameter Server` 来对多机的 :code:`paddle train` 进行同步。 而多机训练神经网络,首先要讲数据切分到不同的机器上。 切分数据文件的方式在PaddlePaddle的开源实现中并没有提供工具包。 但是切分数据并不是一件非常复杂的事情,也不是神经网络实现的重点。 + +多机训练过程中,经典的拓扑结构如下\: + +.. graphviz:: pserver_topology.dot + +图中每个灰色方块是一台机器,在每个机器中,先去启动一个 :code:`paddle pserver` 进程,并确定整体的端口号。可能的参数是\: + +.. code-block:: bash + + paddle pserver --port=5000 --num_gradient_servers=4 --nics='eth0' + +这里说明系统的 :code:`paddle pserver` 的起始端口是 :code:`5000` ,并且有四个训练进程(:code:`gradient_servers`,Paddle同时将 :code:`paddle train` 进程称作 :code:`GradientServer` 。因为其为负责提供Gradient的进程)。 而对于训练进程的话,则需要在 :code:`paddle pserver` 启动之后,再在各个节点上运行如下命令\: + +.. code-block:: bash + + paddle train --port=5000 --pservers=192.168.100.101,192.168.100.102,192.168.100.103,192.168.100.104 --config=... + +对于简单的多机协同使用上述方式即可。同时,pserver/train 通常在高级情况下,还有两个参数需要设置,他们是 + +* --ports_num\: 一个 pserver进程共绑定多少个端口用来做稠密更新。默认是1 +* --ports_num_for_sparse\: 一个pserver进程共绑定多少端口用来做稀疏更新,默认是0 + +使用手工指定端口数量,是因为Paddle的网络通信中,使用了 :code:`int32` 作为消息长度,比较容易在大模型下溢出。所以,在 :code:`paddle pserver` 进程中可以启动多个子线程去接受 trainer 的数据,这样单个子线程的长度就不会溢出了。但是这个值不可以调的过大,因为增加这个值,还是对性能,尤其是内存占用有一定的开销的,另外稀疏更新的端口如果太大的话,很容易某一个参数服务器没有分配到任何参数。 + +详细的说明可以参考,使用 `集群训练Paddle`_ 。 + + +.. _PyDataProvider: ../ui/data_provider/pydataprovider2.html +.. _settings: ../../doc/ui/api/trainer_config_helpers/optimizers.html#settings +.. _mixed_layer: ../../doc/ui/api/trainer_config_helpers/layers.html#mixed-layer +.. _masking-gpu: http://www.acceleware.com/blog/cudavisibledevices-masking-gpus +.. _集群训练Paddle: ../cluster/index.html diff --git a/doc_cn/faq/index.rst b/doc_cn/faq/index.rst index 283607957ce63..db28b4436fe5e 100644 --- a/doc_cn/faq/index.rst +++ b/doc_cn/faq/index.rst @@ -166,4 +166,14 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 +7. *-cp27mu-linux_x86_64.whl is not a supported wheel on this platform. +----------------------------------------------------------------------- + +出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的, +而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。 +更新 :code:`pip` 包的方法是\: + +.. code-block:: bash + + pip install --upgrade pip diff --git a/doc_cn/index.rst b/doc_cn/index.rst index 715da44fb41d4..e74b0942a6cc0 100644 --- a/doc_cn/index.rst +++ b/doc_cn/index.rst @@ -5,6 +5,7 @@ PaddlePaddle文档 -------- * `介绍 `_ * `快速入门 `_ +* `基本使用概念 `_ * `编译与安装 `_ * `用户接口 `_ * `使用示例 `_ From fcf177fc4bcba5f83ed6c53ede9ae5caed5a0bc0 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 2 Nov 2016 07:53:43 +0000 Subject: [PATCH 062/180] reuse code of PoolProjection in PoolProjectionLayer --- paddle/gserver/layers/PoolLayer.cpp | 6 +- paddle/gserver/layers/PoolProjection.cpp | 4 +- paddle/gserver/layers/PoolProjectionLayer.cpp | 64 ++----------------- paddle/gserver/layers/PoolProjectionLayer.h | 35 +++------- paddle/gserver/tests/test_LayerGrad.cpp | 10 ++- 5 files changed, 25 insertions(+), 94 deletions(-) diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/gserver/layers/PoolLayer.cpp index 7fc27ac0bd8e0..2fbc9001f1161 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/gserver/layers/PoolLayer.cpp @@ -52,10 +52,8 @@ bool PoolLayer::init(const LayerMap& layerMap, Layer* PoolLayer::create(const LayerConfig& config) { CHECK_EQ(config.inputs_size(), 1); const std::string& pool = config.inputs(0).pool_conf().pool_type(); - if (pool == "max-projection") { - return new MaxPoolProjectionLayer(config); - } else if (pool == "avg-projection") { - return new AvgPoolProjectionLayer(config); + if (pool == "max-projection" || pool == "avg-projection") { + return new PoolProjectionLayer(config); #ifndef PADDLE_ONLY_CPU } else if (CudnnPoolLayer::typeCheck(pool)) { return new CudnnPoolLayer(config); diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index 50059ee04d39b..468ca6f1b7d2d 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -21,9 +21,9 @@ REGISTER_PROJECTION_CREATE_FUNC(pool2, &PoolProjection::create); PoolProjection* PoolProjection::create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) { const std::string& pool = config.pool_conf().pool_type(); - if (pool == "max") { + if (pool == "max-projection") { return new MaxPoolProjection(config, parameter, useGpu); - } else if (pool == "avg") { + } else if (pool == "avg-projection") { return new AvgPoolProjection(config, parameter, useGpu); } else { LOG(FATAL) << "Unknown pool type: " << pool; diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index 5a2e9afb6e164..3a54c51cfc1ac 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -19,6 +19,7 @@ limitations under the License. */ namespace paddle { + size_t PoolProjectionLayer::getSize() { CHECK_EQ(inputLayers_.size(), 1UL); size_t layerSize = 0; @@ -41,71 +42,20 @@ size_t PoolProjectionLayer::getSize() { return layerSize; } -void MaxPoolProjectionLayer::forward(PassType passType) { - Layer::forward(passType); - - /* malloc memory for the output_ if necessary */ - /* note: one sample correspond to one ROW */ - MatrixPtr input = getInputValue(0); - int batchSize = input->getHeight(); - int size = getSize(); - resetOutput(batchSize, size); - - MatrixPtr outV = getOutputValue(); - - outV->maxPoolForward(*input, imgSizeH_, imgSizeW_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, confPaddingY_, confPadding_); -} - -void MaxPoolProjectionLayer::backward(const UpdateCallback& callback) { - (void)callback; - - if (NULL == getInputGrad(0)) { - return; - } - - /* Do derivation */ - MatrixPtr outGrad = getOutputGrad(); - MatrixPtr inputV = getInputValue(0); - MatrixPtr outV = getOutputValue(); - MatrixPtr inputGrad = getInputGrad(0); - - inputGrad->maxPoolBackward(*inputV, imgSizeH_, imgSizeW_, *outGrad, *outV, - sizeX_, sizeY_, - strideY_, stride_, outputH_, outputW_, 1, 1, - confPaddingY_, confPadding_); -} - -void AvgPoolProjectionLayer::forward(PassType passType) { +void PoolProjectionLayer::forward(PassType passType) { Layer::forward(passType); - - /* malloc memory for the output_ if necessary */ - /* note: one sample correspond to one ROW */ - MatrixPtr input = getInputValue(0); - int batchSize = input->getHeight(); + const Argument& in = getInput(0); + int batchSize = in.value->getHeight(); int size = getSize(); resetOutput(batchSize, size); - - MatrixPtr outV = getOutputValue(); - - outV->avgPoolForward(*input, imgSizeH_, imgSizeW_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, confPaddingY_, confPadding_); + poolProjection_->forward(&in, &output_, passType); } -void AvgPoolProjectionLayer::backward(const UpdateCallback& callback) { +void PoolProjectionLayer::backward(const UpdateCallback& callback) { (void)callback; - if (NULL == getInputGrad(0)) { return; } - /* Do derivation */ - MatrixPtr outputGrad = getOutputGrad(); - MatrixPtr inputGrad = getInputGrad(0); - inputGrad->avgPoolBackward(*outputGrad, imgSizeH_, imgSizeW_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, 1, 1, - confPaddingY_, confPadding_); + poolProjection_->backward(callback); } } // namespace paddle diff --git a/paddle/gserver/layers/PoolProjectionLayer.h b/paddle/gserver/layers/PoolProjectionLayer.h index 42bbc83c62246..6e336f79e9043 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.h +++ b/paddle/gserver/layers/PoolProjectionLayer.h @@ -16,6 +16,7 @@ limitations under the License. */ #pragma once #include "PoolLayer.h" +#include "PoolProjection.h" #include "paddle/math/Matrix.h" #include @@ -27,35 +28,19 @@ class PoolProjectionLayer : public PoolLayer { protected: size_t imgSizeH_, imgSizeW_; size_t outputH_, outputW_; + std::unique_ptr poolProjection_; + ProjectionConfig projectionConfig_; public: size_t getSize(); - explicit PoolProjectionLayer(const LayerConfig& config) : PoolLayer(config) {} -}; -/** - * @brief A layer for max pooling - */ -class MaxPoolProjectionLayer : public PoolProjectionLayer { -public: - explicit MaxPoolProjectionLayer(const LayerConfig& config) - : PoolProjectionLayer(config) {} - - ~MaxPoolProjectionLayer() {} - - virtual void forward(PassType passType); - virtual void backward(const UpdateCallback& callback = nullptr); -}; -/** - * @brief A layer for average pooling - */ -class AvgPoolProjectionLayer : public PoolProjectionLayer { -public: - explicit AvgPoolProjectionLayer(const LayerConfig& config) - : PoolProjectionLayer(config) {} - - ~AvgPoolProjectionLayer() {} - virtual void forward(PassType passType); virtual void backward(const UpdateCallback& callback = nullptr); + explicit PoolProjectionLayer(const LayerConfig& config) + : PoolLayer(config) { + PoolConfig* conf = projectionConfig_.mutable_pool_conf(); + *conf = config_.inputs(0).pool_conf(); + poolProjection_.reset(PoolProjection::create(projectionConfig_, nullptr, + useGpu_)); + } }; } // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 3d633f4b72797..fc11a9d39aa85 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -897,12 +897,10 @@ void testSppLayer(const string& poolType, const int pyramidHeight, bool trans, TEST(Layer, SpatialPyramidPoolLayer) { for (auto useGpu : {false, true}) { - testSppLayer("avg", 1, false, useGpu); - testSppLayer("avg", 3, false, useGpu); - testSppLayer("avg", 5, false, useGpu); - testSppLayer("max", 1, false, useGpu); - testSppLayer("max", 3, false, useGpu); - testSppLayer("avg", 5, false, useGpu); + for (auto pyramidHeight : {1, 2, 3}) { + testSppLayer("avg-projection", pyramidHeight, false, useGpu); + testSppLayer("max-projection", pyramidHeight, false, useGpu); + } } } From ee028bb5eaf9368bc411a0d9f6d40197866b7aec Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 2 Nov 2016 04:14:35 -0500 Subject: [PATCH 063/180] Add How to build docs (#312) --- doc_cn/howto/how_to_write_docs/index.rst | 63 +++++++++++++++++++ doc_cn/index.rst | 1 + paddle/scripts/tools/build_docs/.gitignore | 2 + paddle/scripts/tools/build_docs/Dockerfile | 6 ++ paddle/scripts/tools/build_docs/build.sh | 13 ++++ paddle/scripts/tools/build_docs/build_docs.sh | 4 ++ 6 files changed, 89 insertions(+) create mode 100644 doc_cn/howto/how_to_write_docs/index.rst create mode 100644 paddle/scripts/tools/build_docs/.gitignore create mode 100644 paddle/scripts/tools/build_docs/Dockerfile create mode 100755 paddle/scripts/tools/build_docs/build.sh create mode 100755 paddle/scripts/tools/build_docs/build_docs.sh diff --git a/doc_cn/howto/how_to_write_docs/index.rst b/doc_cn/howto/how_to_write_docs/index.rst new file mode 100644 index 0000000000000..869ef747f9f88 --- /dev/null +++ b/doc_cn/howto/how_to_write_docs/index.rst @@ -0,0 +1,63 @@ +############################### +如何贡献/修改PaddlePaddle的文档 +############################### + +PaddlePaddle的文档使用 `cmake`_ 驱动 `sphinx`_ 生成。公有两个文档,:code:`doc` 和 :code:`doc_cn` 。这两者会在 `cmake`_ 中进行编译,生成后的文档会存储在服务器的 :code:`doc` 和 :code:`doc_cn` 两个目录下。 + +下面分几个部分介绍一下PaddlePaddle文档的贡献方法。 + +如何书写PaddlePaddle的文档 +========================== + +TBD + +如何构建PaddlePaddle的文档 +========================== + +构建PaddlePaddle文档,需要使用构建Paddle的全部环境。准备这个环境相对来说比较复杂,所以本文档提供两种方式构建PaddlePaddle的文档,即 + +* 使用Docker构建PaddlePaddle的文档 +* 直接构建PaddlePaddle的文档。 + +并且,我们推荐使用Docker来构建PaddlePaddle的文档。 + + +使用Docker构建PaddlePaddle的文档 +-------------------------------- + +使用Docker构建PaddlePaddle的文档,首先要求在系统里安装好Docker工具包。安装Docker请参考 `Docker的官网 `_ 。 + +安装好Docker之后可以使用源码目录下的脚本构建文档,即 + +.. code-block:: bash + + cd TO_YOUR_PADDLE_CLONE_PATH + cd paddle/scripts/tools/build_docs + bash build_docs.sh + +执行完这个脚本后,该目录下会生成两个目录,分别是\: + +* doc 目录,英文文档地址 +* doc_cn 目录,中文文档地址 + +打开浏览器访问对应目录下的index.html即可访问本地文档。 + +.. code-block:: bash + + open doc_cn/index.html + + +直接构建PaddlePaddle的文档 +-------------------------- + +TBD + + +如何更新www.paddlepaddle.org文档 +================================ + +TBD + + +.. _cmake: https://cmake.org/ +.. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ \ No newline at end of file diff --git a/doc_cn/index.rst b/doc_cn/index.rst index e74b0942a6cc0..f1398206fddff 100644 --- a/doc_cn/index.rst +++ b/doc_cn/index.rst @@ -15,6 +15,7 @@ PaddlePaddle文档 开发指南 -------- * `新写Layer <../doc/dev/new_layer/index.html>`_ +* `如何贡献文档 `_ 算法教程 -------- diff --git a/paddle/scripts/tools/build_docs/.gitignore b/paddle/scripts/tools/build_docs/.gitignore new file mode 100644 index 0000000000000..6ec14c8f5bc37 --- /dev/null +++ b/paddle/scripts/tools/build_docs/.gitignore @@ -0,0 +1,2 @@ +doc +doc_cn diff --git a/paddle/scripts/tools/build_docs/Dockerfile b/paddle/scripts/tools/build_docs/Dockerfile new file mode 100644 index 0000000000000..5db0b29c47399 --- /dev/null +++ b/paddle/scripts/tools/build_docs/Dockerfile @@ -0,0 +1,6 @@ +FROM paddledev/paddle:cpu-devel-latest +COPY build.sh / +RUN pip install sphinx &&\ + apt install -y doxygen graphviz &&\ + pip install breathe recommonmark numpy protobuf==2.6.1 +CMD /build.sh diff --git a/paddle/scripts/tools/build_docs/build.sh b/paddle/scripts/tools/build_docs/build.sh new file mode 100755 index 0000000000000..a23b6e61d4592 --- /dev/null +++ b/paddle/scripts/tools/build_docs/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex + +mkdir -p /build +cd /build +cmake /paddle -DWITH_DOC=ON +make paddle_docs paddle_docs_cn -j `nproc` +mkdir -p /output/doc +mkdir -p /output/doc_cn +cp -r doc/html/* /output/doc/ +cp -r doc_cn/html/* /output/doc_cn/ +cd / +rm -rf /paddle/build diff --git a/paddle/scripts/tools/build_docs/build_docs.sh b/paddle/scripts/tools/build_docs/build_docs.sh new file mode 100755 index 0000000000000..9f8b80435c8fb --- /dev/null +++ b/paddle/scripts/tools/build_docs/build_docs.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +docker build . -t paddle_build_doc +docker run --rm -v $PWD/../../../../:/paddle -v $PWD:/output paddle_build_doc From 5acf136615c9f1b6e73cf9a9bdbaac770e2e9669 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 2 Nov 2016 19:07:04 +0800 Subject: [PATCH 064/180] Bug fix in CudnnConvLayer, which will lead to destruction error. (#317) --- paddle/gserver/layers/CudnnConvLayer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/gserver/layers/CudnnConvLayer.cpp b/paddle/gserver/layers/CudnnConvLayer.cpp index e77216f17c3be..23ba2341185d1 100644 --- a/paddle/gserver/layers/CudnnConvLayer.cpp +++ b/paddle/gserver/layers/CudnnConvLayer.cpp @@ -35,8 +35,8 @@ bool CudnnConvLayer::init(const LayerMap &layerMap, ProjectionConfig* conf = new ProjectionConfig(); conf->set_type("conv"); conf->set_num_filters(numFilters_); - conf->set_allocated_conv_conf( - config_.mutable_inputs(i)->mutable_conv_conf()); + ConvConfig* convConf = conf->mutable_conv_conf(); + *convConf = *(config_.mutable_inputs(i)->mutable_conv_conf()); conf->set_input_size(getPrev(i)->getSize()); conf->set_output_size(getSize()); projConf_.emplace_back(conf); From 968464cc605ed67c658af6c65d617b7036bfecfb Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 2 Nov 2016 12:47:13 -0500 Subject: [PATCH 065/180] Fix a bug in testOnePeriod. (#322) * Forget to finishTestPeriod in testOnePeriod. * Fix #318 --- paddle/trainer/Tester.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/trainer/Tester.cpp b/paddle/trainer/Tester.cpp index b1bb75654a9d5..d3b88019faa04 100644 --- a/paddle/trainer/Tester.cpp +++ b/paddle/trainer/Tester.cpp @@ -116,6 +116,7 @@ void Tester::testOnePeriod() { } testOneDataBatch(dataBatch, &outArgs); } + finishTestPeriod(); } void Tester::finishTestPeriod() { From d412a5ea21bff386f47ef79ef182d3e86466c175 Mon Sep 17 00:00:00 2001 From: Haichao-Zhang Date: Wed, 2 Nov 2016 13:05:59 -0700 Subject: [PATCH 066/180] add user_arg to LayerConfig (#315) --- proto/ModelConfig.proto.m4 | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 79e76b6bf1bdd..3fb96dd5fe00c 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -382,6 +382,15 @@ sinclude(`ModelConfigLayer.proto.m4') // bias size optional uint32 bias_size = 48 [default = 0]; + + // this parameter can be used as a user-defined parameter when necessary, + // without changing the proto file. + // e.g., when a new layer with a user-defined parameter is implemented, + // it can be used to pass that parameter, without modifying the proto file. + // string type is used for flexibility: different types can be converted + // to string and reinterpreted in the user's own layer implementation. + optional string user_arg = 49; + } message EvaluatorConfig { From 9f9b4afcdb5f0dfb0dd203a0d02af04163a23ab7 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Wed, 2 Nov 2016 20:20:40 -0700 Subject: [PATCH 067/180] install the right python package version (#326) For multiple installation of paddle, there might be multiple versions of python package at opt/paddle/share/wheels/. We should install the right version. Ideally, we should remove the wrong versions when install. But it's not easy to do this with cmake. Change-Id: Ida8a8d60643ad9e42cf1c85776de9122d5ba1392 --- paddle/scripts/submit_local.sh.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/scripts/submit_local.sh.in b/paddle/scripts/submit_local.sh.in index 4cf5f41f195df..213cf2f1cc7e4 100644 --- a/paddle/scripts/submit_local.sh.in +++ b/paddle/scripts/submit_local.sh.in @@ -68,7 +68,7 @@ EOF if [ $? -eq 1 ]; then # Older version installed, or not installed at all echo "First time run paddle, need to install some python dependencies." BASEDIR=$(dirname "$0") - pip install ${BASEDIR}/../opt/paddle/share/wheels/*.whl + pip install ${BASEDIR}/../opt/paddle/share/wheels/*-@PADDLE_VERSION@-*.whl if [ $? -ne 0 ]; then echo "pip install wheels failed. " echo "Please use 'sudo paddle' at the first time you use PaddlePaddle" From 5f2059db05bbc0590f4971198e034a56d1aa5915 Mon Sep 17 00:00:00 2001 From: lzhao4ever Date: Thu, 3 Nov 2016 09:24:16 -0700 Subject: [PATCH 068/180] Add matrix inverse (#240) * Add matrix inverse --- cmake/cblas.cmake | 16 ++++- paddle/cuda/include/hl_cuda_cublas.h | 24 +++++-- .../cuda/include/stub/hl_cuda_cublas_stub.h | 6 ++ paddle/cuda/src/hl_cuda_cublas.cc | 55 ++++++++++++++++ paddle/math/MathFunctions.cpp | 40 ++++++++++++ paddle/math/MathFunctions.h | 15 +++++ paddle/math/Matrix.cpp | 65 +++++++++++++++++++ paddle/math/Matrix.h | 20 ++++++ paddle/math/tests/test_matrixCompare.cpp | 29 ++++++++- 9 files changed, 261 insertions(+), 9 deletions(-) diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index 57c32a54cd727..685334c658506 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -1,4 +1,4 @@ -# Find the CBlas libraries +# Find the CBlas and lapack libraries # # It will search MKL, atlas, OpenBlas, reference-cblas in order. # @@ -19,6 +19,8 @@ set(MKL_ROOT $ENV{MKL_ROOT} CACHE PATH "Folder contains MKL") find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT}/include) +find_path(MKL_INCLUDE_DIR mkl_lapacke.h PATHS + ${MKL_ROOT}/include) find_library(MKL_CORE_LIB NAMES mkl_core PATHS ${MKL_ROOT}/lib ${MKL_ROOT}/lib/intel64) @@ -37,6 +39,7 @@ if(MKL_INCLUDE_DIR AND MKL_CORE_LIB AND MKL_SEQUENTIAL_LIB AND MKL_INTEL_LP64) ${MKL_SEQUENTIAL_LIB} ${MKL_CORE_LIB}) add_definitions(-DPADDLE_USE_MKL) + message(STATUS "Found MKL (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") return() # return file. endif() @@ -55,15 +58,19 @@ set(ATLAS_LIB_SEARCH_PATHS ) find_path(ATLAS_INC_DIR NAMES cblas.h PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) +find_path(ATLAS_CLAPACK_INC_DIR NAMES clapack.h + PATHS ${ATLAS_INCLUDE_SEARCH_PATHS}) find_library(ATLAS_CBLAS_LIB NAMES cblas libcblas.so.3 PATHS ${ATLAS_LIB_SEARCH_PATHS}) -find_library(ATLAS_LIB NAMES atlas libatlas.so.3 +find_library(ATLAS_LIB NAMES lapack_atlas liblapack_atlas.so.3 PATHS ${ATLAS_LIB_SEARCH_PATHS}) if(ATLAS_INC_DIR AND ATLAS_CBLAS_LIB AND ATLAS_LIB) set(CBLAS_PROVIDER ATLAS) - set(CBLAS_INC_DIR ${ATLAS_INC_DIR}) + set(CBLAS_INC_DIR ${ATLAS_INC_DIR} ${ATLAS_CLAPACK_INC_DIR}) set(CBLAS_LIBS ${ATLAS_LIB} ${ATLAS_CBLAS_LIB}) + add_definitions(-DPADDLE_USE_ATLAS) + message(STATUS "Found Atlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") return() endif() @@ -83,6 +90,8 @@ set(OPENBLAS_LIB_SEARCH_PATHS find_path(OPENBLAS_INC_DIR NAMES cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS}) +find_path(OPENBLAS_LAPACKE_INC_DIR NAMES lapacke.h + PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS}) find_library(OPENBLAS_LIB NAMES openblas PATHS ${OPENBLAS_LIB_SEARCH_PATHS}) @@ -90,6 +99,7 @@ if(OPENBLAS_INC_DIR AND OPENBLAS_LIB) set(CBLAS_PROVIDER OPENBLAS) set(CBLAS_INC_DIR ${OPENBLAS_INC_DIR}) set(CBLAS_LIBS ${OPENBLAS_LIB}) + message(STATUS "Found OpenBlas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBS})") return() endif() diff --git a/paddle/cuda/include/hl_cuda_cublas.h b/paddle/cuda/include/hl_cuda_cublas.h index 0ffbed18b5f9e..d757317eb4a97 100644 --- a/paddle/cuda/include/hl_cuda_cublas.h +++ b/paddle/cuda/include/hl_cuda_cublas.h @@ -21,8 +21,8 @@ limitations under the License. */ /** * @brief Matrix transpose: C_d = T(A_d) * - * @param[in] A_d input matrix (M x N). - * @param[out] C_d output matrix (N x M). + * @param[in] A_d input matrix (dimM x dimN). + * @param[out] C_d output matrix (dimN x dimM). * @param[in] dimM matrix height. * @param[in] dimN matrix width. * @param[in] lda the first dimension of A_d. @@ -39,8 +39,8 @@ extern void hl_matrix_transpose(real *A_d, /* * @brief Matrix transpose, while lda = dimN, ldc = dimM. * - * @param[in] A_d input matrix (M x N). - * @param[out] C_d output matrix (N x M). + * @param[in] A_d input matrix (dimM x dimN). + * @param[out] C_d output matrix (dimN x dimM). * @param[in] dimM matrix height. * @param[in] dimN matrix width. * @@ -50,6 +50,22 @@ extern void hl_matrix_transpose(real *A_d, int dimM, int dimN); +/* + * @brief Matrix inverse + * + * @param[in] A_d input matrix (dimN x dimN). + * @param[out] C_d output matrix (dimN x dimN). + * @param[in] dimN matrix height = matrix width + * @param[in] lda the first dimension of A_d + * @param[in] ldc the first dimension of C_d + * + */ +extern void hl_matrix_inverse(real *A_d, + real *C_d, + int dimN, + int lda, + int ldc); + /** * @brief C_d = alpha*(op(A_d) * op(B_d)) + beta*C_d * diff --git a/paddle/cuda/include/stub/hl_cuda_cublas_stub.h b/paddle/cuda/include/stub/hl_cuda_cublas_stub.h index 4a5e2a25a71b3..903dcbe8355d6 100644 --- a/paddle/cuda/include/stub/hl_cuda_cublas_stub.h +++ b/paddle/cuda/include/stub/hl_cuda_cublas_stub.h @@ -30,6 +30,12 @@ inline void hl_matrix_transpose(real *A_d, int dimM, int dimN) {} +inline void hl_matrix_inverse(real *A_d, + real *C_d, + int dimN, + int lda, + int ldc) {} + inline void hl_matrix_mul(real *A_d, hl_trans_op_t transa, real *B_d, hl_trans_op_t transb, real *C_d, diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/cuda/src/hl_cuda_cublas.cc index b3c9001ba3973..724ea490e8ea9 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/cuda/src/hl_cuda_cublas.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include #include +#include "hl_cuda.h" #include "hl_cuda_cublas.h" #include "hl_thread.ph" #include "hl_dso_loader.h" @@ -75,6 +76,8 @@ DYNAMIC_LOAD_CUBLAS_WRAP(cublasSgemmBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasDgemmBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasCgemmBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasZgemmBatched) +DYNAMIC_LOAD_CUBLAS_WRAP(cublasSgetrfBatched) +DYNAMIC_LOAD_CUBLAS_WRAP(cublasSgetriBatched) CUBLAS_BLAS_ROUTINE_EACH(DYNAMIC_LOAD_CUBLAS_V2_WRAP) #undef DYNAMIC_LOAD_CUBLAS_WRAP @@ -88,10 +91,14 @@ CUBLAS_BLAS_ROUTINE_EACH(DYNAMIC_LOAD_CUBLAS_V2_WRAP) #define CUBLAS_GEAM dynload::cublasSgeam #define CUBLAS_GEMV dynload::cublasSgemv #define CUBLAS_GEMM dynload::cublasSgemm +#define CUBLAS_GETRF dynload::cublasSgetrfBatched +#define CUBLAS_GETRI dynload::cublasSgetriBatched #else #define CUBLAS_GEAM dynload::cublasDgeam #define CUBLAS_GEMV dynload::cublasDgemv #define CUBLAS_GEMM dynload::cublasDgemm +#define CUBLAS_GETRF dynload::cublasDgetrfBatched +#define CUBLAS_GETRI dynload::cublasDgetriBatched #endif const char* hl_cublas_get_error_string(cublasStatus_t status) { @@ -162,6 +169,54 @@ void hl_matrix_transpose(real *A_d, real *C_d, int dimM, int dimN) { hl_matrix_transpose(A_d, C_d, dimM, dimN, dimN, dimM); } +void hl_matrix_inverse(real *A_d, real *C_d, int dimN, int lda, int ldc) { + /* Solve Ax = I */ + CHECK_NOTNULL(A_d); + CHECK_NOTNULL(C_d); + + /* Step 1: Compute the LU decomposition of matrix A */ + real **inout_h = &A_d; + real **inout_d = (real **)hl_malloc_device(sizeof(real *)); + hl_memcpy(inout_d, inout_h, sizeof(real *)); + + int *pivot_d = (int *)hl_malloc_device(dimN*sizeof(int)); + int *info_d = (int *)t_resource.gpu_mem; + + /* Note: cublasSgetrfBatched is used to calculate a number of + small-sized matrices. There may be a better way to reconstruct + the API for better performance. + */ + CHECK_CUBLAS(CUBLAS_GETRF(t_resource.handle, + dimN, inout_d, lda, pivot_d, + info_d, 1)); + + int info_h; + hl_memcpy(&info_h, info_d, sizeof(int)); + if (info_h != 0) { + LOG(FATAL) << "Factorization of matrix failed: matrix may be singular.\n"; + } + + /* Step 2: Compute the inverse of the matrix given its LU decomposition */ + real **out_h = &C_d; + real **out_d = (real **)hl_malloc_device(sizeof(real *)); + hl_memcpy(out_d, out_h, sizeof(real *)); + + CHECK_CUBLAS(CUBLAS_GETRI(t_resource.handle, + dimN, (const real **)inout_d, lda, pivot_d, + out_d, ldc, info_d, 1)); + + hl_memcpy(&info_h, info_d, sizeof(int)); + if (info_h != 0) { + LOG(FATAL) << "Inversion of matrix failed: matrix may be singular.\n"; + } + + hl_free_mem_device(inout_d); + hl_free_mem_device(pivot_d); + hl_free_mem_device(out_d); + + CHECK_SYNC("hl_matrix_inverse failed"); +} + void hl_matrix_mul(real *A_d, hl_trans_op_t transa, real *B_d, hl_trans_op_t transb, real *C_d, diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index da493379e3a37..f8132066477db 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -39,6 +39,46 @@ void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, beta, C, ldc); } +template<> +int getrf(const CBLAS_ORDER order, const int M, const int N, + float *A, const int lda, int *ipiv) { +#ifdef PADDLE_USE_ATLAS + return clapack_sgetrf(order, M, N, A, lda, ipiv); +#else + return LAPACKE_sgetrf(order, M, N, A, lda, ipiv); +#endif +} + +template<> +int getrf(const CBLAS_ORDER order, const int M, const int N, + double *A, const int lda, int *ipiv) { +#ifdef PADDLE_USE_ATLAS + return clapack_dgetrf(order, M, N, A, lda, ipiv); +#else + return LAPACKE_dgetrf(order, M, N, A, lda, ipiv); +#endif +} + +template<> +int getri(const CBLAS_ORDER order, const int N, float *A, + const int lda, const int *ipiv) { +#ifdef PADDLE_USE_ATLAS + return clapack_sgetri(order, N, A, lda, ipiv); +#else + return LAPACKE_sgetri(order, N, A, lda, ipiv); +#endif +} + +template<> +int getri(const CBLAS_ORDER order, const int N, double *A, + const int lda, const int *ipiv) { +#ifdef PADDLE_USE_ATLAS + return clapack_dgetri(order, N, A, lda, ipiv); +#else + return LAPACKE_dgetri(order, N, A, lda, ipiv); +#endif +} + template<> void axpy(const int n, const float alpha, const float* x, float* y) { cblas_saxpy(n, alpha, x, 1, y, 1); diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index 43075977dc9ce..cad0e4740b8c1 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -21,6 +21,13 @@ limitations under the License. */ extern "C" { #include } +#ifdef PADDLE_USE_ATLAS +extern "C" { +#include +} +#else +#include +#endif #endif #include @@ -34,6 +41,14 @@ void gemm(const CBLAS_TRANSPOSE transA, const CBLAS_TRANSPOSE transB, const T* B, const int ldb, const T beta, T* C, const int ldc); +template +int getrf(const CBLAS_ORDER Order, const int M, const int N, + T *A, const int lda, int *ipiv); + +template +int getri(const CBLAS_ORDER Order, const int N, T *A, + const int lda, const int *ipiv); + template void axpy(const int n, const T alpha, const T* x, T* y); diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index aaeae98f0d28b..d901ba93492ac 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -335,6 +335,30 @@ void GpuMatrix::transpose(MatrixPtr matTrans, bool memAlloc) { hl_matrix_transpose(data, dataTrans, height_, width_, lda, ldc); } + +MatrixPtr GpuMatrix::getInverse() { + MatrixPtr matInv; + inverse(matInv, true); + return matInv; +} + +void GpuMatrix::inverse(MatrixPtr matInv, bool memAlloc) { + CHECK_EQ(height_, width_); + + if (memAlloc) { + matInv = std::make_shared(height_, width_); + } else { + CHECK(matInv != NULL); + } + + real* data = getData(); + real* dataInv = matInv->getData(); + int lda = getStride(); + int ldc = matInv->getStride(); + + hl_matrix_inverse(data, dataInv, height_, lda, ldc); +} + void GpuMatrix::addBias(Matrix& b, real scale) { CHECK(b.getHeight() == 1) << "the Bias should be a vector"; BaseMatrix::addBias(b, scale); @@ -1437,6 +1461,47 @@ void CpuMatrix::transpose(MatrixPtr matTrans, bool memAlloc) { } } + +MatrixPtr CpuMatrix::getInverse() { + MatrixPtr matInv; + inverse(matInv, true); + return matInv; +} + +void CpuMatrix::inverse(MatrixPtr matInv, bool memAlloc) { + CHECK_EQ(height_, width_); + + if (memAlloc) { + matInv = std::make_shared(height_, width_); + } else { + CHECK(matInv != NULL); + } + + CHECK_EQ(height_, matInv->getHeight()); + CHECK_EQ(width_, matInv->getWidth()); + matInv->copyFrom(*this); + + real* data = getData(); + real* dataInv = matInv->getData(); + int ldc = matInv->getStride(); + + if (height_ == 1) { + CHECK_NE(*data, 0); + *dataInv = 1.0 / (*data); + return; + } + + /* Compute the LU decomposition of the matrix */ + std::vector ipiv(height_); + CBLAS_ORDER order = (matInv->isTransposed() ? CblasColMajor : CblasRowMajor); + int info = getrf(order, height_, height_, dataInv, ldc, ipiv.data()); + CHECK_EQ(info, 0); + + /* Compute the inverse of the matrix given its LU decompsotion */ + info = getri(order, height_, dataInv, ldc, ipiv.data()); + CHECK_EQ(info, 0); +} + void CpuMatrix::convExpand(Matrix& feature, int feaImgHeight, int feaImgWidth, int channels, int blockH, int blockW, int strideH, int strideW, int paddingH, int paddingW, diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 52cbed528ca8b..293d13f4d6d5a 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -328,6 +328,20 @@ class Matrix : public BaseMatrix { LOG(FATAL) << "Not implemented"; } + virtual MatrixPtr getInverse() { + LOG(FATAL) << "Not implemented"; + } + + /** + * @brief inverse. + * + * if allocate matInv's memory outside, then set memAlloc as false; + * else set as true. + */ + virtual void inverse(MatrixPtr matInv, bool memAlloc) { + LOG(FATAL) << "Not implemented"; + } + public: /// Only set all variables to 0 or NULL but not free them. virtual void clear() { @@ -1043,6 +1057,9 @@ class GpuMatrix : public Matrix { MatrixPtr getTranspose(); void transpose(MatrixPtr matTrans, bool memAlloc); + MatrixPtr getInverse(); + void inverse(MatrixPtr matInv, bool memAlloc); + /// add b to each sample of this. void addBias(Matrix& b, real scale); void addSharedBias(Matrix& b, real scale); @@ -1282,6 +1299,9 @@ class CpuMatrix : public Matrix { MatrixPtr getTranspose(); void transpose(MatrixPtr matTrans, bool memAlloc); + MatrixPtr getInverse(); + void inverse(MatrixPtr matInv, bool memAlloc); + void copyFrom(const Matrix& src); void copyFrom(const Matrix& src, hl_stream_t stream); diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index 0ddf7e0dfc386..b887cccaaa14e 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -641,9 +641,32 @@ void testMatrixTranspose(int height, int width) { MatrixCheckEqual(*cpuT, *outputCheck); } +void testMatrixInverse(int height) { + MatrixPtr cpu = std::make_shared(height, height); + MatrixPtr gpu = std::make_shared(height, height); + MatrixPtr cpuI = std::make_shared(height, height); + MatrixPtr gpuI = std::make_shared(height, height); + + cpu->randomizeUniform(); + gpu->copyFrom(*cpu); + cpu->inverse(cpuI, false); + gpu->inverse(gpuI, false); + + MatrixPtr outputCheck = std::make_shared(height, height); + outputCheck->copyFrom(*gpuI); + MatrixCheckErr(*cpuI, *outputCheck); + + outputCheck->mul(cpu, cpuI); + cpu->zeroMem(); + for (int i = 0; i < height; i++) { + cpu->getRowBuf(i)[i] = 1.0; + } + MatrixCheckErr(*cpu, *outputCheck); +} + TEST(Matrix, unary) { - for (auto height : {1, 11, 73, 128, 200, 330}) { - for (auto width : {1, 32, 100, 512, 1000, 3210}) { + for (auto height : {1, 3, 11, 73, 128, 200, 330}) { + for (auto width : {1, 3, 32, 100, 512, 1000, 3210}) { VLOG(3) << " height=" << height << " width=" << width; // applyUnary @@ -675,6 +698,8 @@ TEST(Matrix, unary) { // transpose testMatrixTranspose(height, width); } + // inverse + testMatrixInverse(height); } } From 1de75c039fb7e6c9bb433f91c574d264eb397f9c Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Fri, 4 Nov 2016 00:40:37 +0800 Subject: [PATCH 069/180] report error when use parallel_nn to train recurrent_nn model (#335) --- paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp index 952df60a7d786..22698f5867017 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp @@ -28,6 +28,12 @@ void ParallelNeuralNetwork::init( const std::vector& parameterTypes, bool useGpu) { NeuralNetwork::init(config, callback, parameterTypes, useGpu); + if (config.type() == "recurrent_nn") { + LOG(FATAL) + << "You can not add `--parallel_nn=true` on the command line, " + << "parallel_nn training mode does not support the recurrent_nn model."; + } + useGpu_ = useGpu; numDevices_ = 0; if (useGpu_) { From f6f80b93788ad6ba56158df070f007c6ccef28aa Mon Sep 17 00:00:00 2001 From: emailweixu Date: Thu, 3 Nov 2016 19:35:18 -0700 Subject: [PATCH 070/180] install the right python package version (#340) For multiple installation of paddle, there might be multiple versions of python package at opt/paddle/share/wheels/. We should install the right version. Ideally, we should remove the wrong versions when install. But it's not easy to do this with cmake. Change-Id: Ida8a8d60643ad9e42cf1c85776de9122d5ba1392 From ed83a1d6b68d2bd1f573a0c4cabf811e9c32aead Mon Sep 17 00:00:00 2001 From: wangkuiyi Date: Thu, 3 Nov 2016 22:15:20 -0700 Subject: [PATCH 071/180] Fix minor errors in instructions of building Paddle on Mac OS X (#347) --- doc/build/build_from_source.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md index 7727c8c3788b9..c37234d3ef14d 100644 --- a/doc/build/build_from_source.md +++ b/doc/build/build_from_source.md @@ -219,10 +219,9 @@ easy_install pip # Install google test on Mac OS X # Download gtest 1.7.0 wget https://github.com/google/googletest/archive/release-1.7.0.tar.gz - tar -xvf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0 + tar -xzf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0 # Build gtest - mkdir build && cmake .. - make + mkdir build && cd build && cmake .. && make # Install gtest library sudo cp -r ../include/gtest /usr/local/include/ sudo cp lib*.a /usr/local/lib From 3424a4c0d898820442b648c31b510403fea7c994 Mon Sep 17 00:00:00 2001 From: gangliao Date: Thu, 3 Nov 2016 22:39:39 -0700 Subject: [PATCH 072/180] Fix bug and redundant code in hl_dso_loader.cc (#306) --- paddle/cuda/src/hl_cuda_cudnn.cc | 73 ++++++++----------------------- paddle/cuda/src/hl_cuda_device.cc | 31 +++---------- paddle/cuda/src/hl_dso_loader.cc | 4 +- 3 files changed, 27 insertions(+), 81 deletions(-) diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/cuda/src/hl_cuda_cudnn.cc index 7810d0d10053d..92b28e4345c3d 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/cuda/src/hl_cuda_cudnn.cc @@ -41,65 +41,28 @@ void* cudnn_dso_handle = nullptr; #ifdef PADDLE_USE_DSO -#define DYNAMIC_LOAD_CUDNN_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - cudnnStatus_t operator()(Args... args) { \ - typedef cudnnStatus_t (*cudnnFunc)(Args...); \ - std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, \ - &cudnn_dso_handle); \ - void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ - } \ +#define DYNAMIC_LOAD_CUDNN_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using cudnn_func = decltype(__name(args...))(*)(Args...); \ + std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, \ + &cudnn_dso_handle); \ + void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ + return reinterpret_cast(p_##__name)(args...); \ + } \ } __name; /* struct DynLoad__##__name */ -struct DynLoad__cudnnGetVersion { - template - size_t operator()(Args... args) { - typedef size_t (*cudnnFunc)(Args...); - std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, - &cudnn_dso_handle); - void* p_name = dlsym(cudnn_dso_handle, "cudnnGetVersion"); - return reinterpret_cast(p_name)(args...); - } -} cudnnGetVersion; /* struct DynLoad__##__name */ - -struct DynLoad__cudnnGetErrorString { - template - const char* operator()(Args... args) { - typedef const char* (*cudnnFunc)(Args...); - std::call_once(cudnn_dso_flag, GetCudnnDsoHandle, - &cudnn_dso_handle); - void* p_name = dlsym(cudnn_dso_handle, "cudnnGetErrorString"); - return reinterpret_cast(p_name)(args...); - } -} cudnnGetErrorString; /* struct DynLoad__##__name */ - - #else -#define DYNAMIC_LOAD_CUDNN_WRAP(__name) \ - struct DynLoad__##__name { \ - template \ - cudnnStatus_t operator()(Args... args) { \ - return __name(args...); \ - } \ +#define DYNAMIC_LOAD_CUDNN_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + return __name(args...); \ + } \ } __name; /* struct DynLoad__##__name */ -struct DynLoad__cudnnGetVersion { - template - size_t operator()(Args... args) { - return cudnnGetVersion(args...); - } -} cudnnGetVersion; /* struct DynLoad__##__name */ - -struct DynLoad__cudnnGetErrorString { - template - const char* operator()(Args... args) { - return cudnnGetErrorString(args...); - } -} cudnnGetErrorString; /* struct DynLoad__##__name */ - #endif /** @@ -133,7 +96,9 @@ struct DynLoad__cudnnGetErrorString { __macro(cudnnPoolingForward) \ __macro(cudnnPoolingBackward) \ __macro(cudnnSoftmaxBackward) \ - __macro(cudnnSoftmaxForward) + __macro(cudnnSoftmaxForward) \ + __macro(cudnnGetVersion) \ + __macro(cudnnGetErrorString) CUDNN_DNN_ROUTINE_EACH(DYNAMIC_LOAD_CUDNN_WRAP) #define CUDNN_DNN_ROUTINE_EACH_R2(__macro) \ diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/cuda/src/hl_cuda_device.cc index e9fe9f1c117a0..3ea2c91bd5a41 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/cuda/src/hl_cuda_device.cc @@ -85,44 +85,24 @@ void* cudart_dso_handle = nullptr; #define DYNAMIC_LOAD_CUDART_WRAP(__name) \ struct DynLoad__##__name { \ template \ - cudaError_t operator()(Args... args) { \ - typedef cudaError_t (*cudartFunc)(Args...); \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using cudart_func = decltype(__name(args...))(*)(Args...); \ std::call_once(cudart_dso_flag, GetCudartDsoHandle, \ &cudart_dso_handle); \ void* p_##__name = dlsym(cudart_dso_handle, #__name); \ - return reinterpret_cast(p_##__name)(args...); \ + return reinterpret_cast(p_##__name)(args...); \ } \ } __name; /* struct DynLoad__##__name */ #else #define DYNAMIC_LOAD_CUDART_WRAP(__name) \ struct DynLoad__##__name { \ template \ - cudaError_t operator()(Args... args) { \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ return __name(args...); \ } \ } __name; /* struct DynLoad__##__name */ #endif -#ifdef PADDLE_USE_DSO - struct DynLoad__cudaGetErrorString { - template - const char* operator()(Args... args) { - typedef const char* (*cudaFunc)(Args...); - std::call_once(cudart_dso_flag, GetCudartDsoHandle, - &cudart_dso_handle); - void* p_func = dlsym(cudart_dso_handle, "cudaGetErrorString"); - return reinterpret_cast(p_func)(args...); - } - } cudaGetErrorString; /* struct DynLoad__cudaGetErrorString */ -#else -struct DynLoad__cudaGetErrorString { - template - const char* operator()(Args... args) { - return cudaGetErrorString(args...); - } -} cudaGetErrorString; /* struct DynLoad__cudaGetErrorString */ -#endif - /* include all needed cuda functions in HPPL */ #define CUDA_ROUTINE_EACH(__macro) \ __macro(cudaMalloc) \ @@ -152,7 +132,8 @@ struct DynLoad__cudaGetErrorString { __macro(cudaSetDeviceFlags) \ __macro(cudaGetLastError) \ __macro(cudaFuncSetCacheConfig) \ - __macro(cudaRuntimeGetVersion) + __macro(cudaRuntimeGetVersion) \ + __macro(cudaGetErrorString) CUDA_ROUTINE_EACH(DYNAMIC_LOAD_CUDART_WRAP) diff --git a/paddle/cuda/src/hl_dso_loader.cc b/paddle/cuda/src/hl_dso_loader.cc index 91c60d85a1e41..c0b5d6e357fc7 100644 --- a/paddle/cuda/src/hl_dso_loader.cc +++ b/paddle/cuda/src/hl_dso_loader.cc @@ -49,14 +49,14 @@ static inline std::string join(const std::string& part1, const std::string& part static inline void GetDsoHandleFromDefaultPath( std::string& dso_path, void** dso_handle, int dynload_flags) { LOG(INFO) << "Try to find cuda library: " << dso_path - << "from default system path."; + << " from default system path."; // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH *dso_handle = dlopen(dso_path.c_str(), dynload_flags); // DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to // bring System Integrity Projection (SIP), if dso_handle // is null, search from default package path in Mac OS. - #if defined(__APPLE__) or defined(__OSX__) + #if defined(__APPLE__) || defined(__OSX__) if (nullptr == *dso_handle) { dso_path = join("/usr/local/cuda/lib/", dso_path); *dso_handle = dlopen(dso_path.c_str(), dynload_flags); From 33004ecfb75d15a91eedb9ec7a5bc182ff96c6f8 Mon Sep 17 00:00:00 2001 From: gangliao Date: Fri, 4 Nov 2016 00:43:24 -0700 Subject: [PATCH 073/180] Fix glog check type unmatch in Util.cpp (#353) * Fix glog check type unmatch in Util.cpp #352 --- paddle/utils/Util.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index 45251213d2d79..2cdff9d1aca92 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -106,7 +106,7 @@ pid_t getTID() { #endif pid_t tid = syscall(__NR_gettid); #endif - CHECK_NE(tid, -1); + CHECK_NE((int)tid, -1); return tid; } From 3e2dc77cc715c4b5f78a42f87e3800de0a0f3623 Mon Sep 17 00:00:00 2001 From: gangliao Date: Fri, 4 Nov 2016 02:52:53 -0700 Subject: [PATCH 074/180] Add code coverage and coveralls (#296) --- .travis.yml | 2 + CMakeLists.txt | 4 + README.md | 8 +- cmake/coveralls.cmake | 103 ++++++ cmake/coverallsGcovJsons.cmake | 403 ++++++++++++++++++++++++ paddle/scripts/travis/build_and_test.sh | 8 +- 6 files changed, 522 insertions(+), 6 deletions(-) create mode 100644 cmake/coveralls.cmake create mode 100644 cmake/coverallsGcovJsons.cmake diff --git a/.travis.yml b/.travis.yml index bf0e0b7bbddd4..7812ac0283789 100644 --- a/.travis.yml +++ b/.travis.yml @@ -35,6 +35,8 @@ addons: - libgoogle-glog-dev - libgflags-dev - libgtest-dev + - curl + - lcov - graphviz before_install: - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi diff --git a/CMakeLists.txt b/CMakeLists.txt index 527064e31000a..93cdcd4a75b70 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -40,6 +40,9 @@ option(WITH_TESTING "Compile and run unittest for PaddlePaddle" ${GTEST_FOUND}) option(WITH_DOC "Compile PaddlePaddle with documentation" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with py PaddlePaddle prediction api" ${SWIG_FOUND}) option(ON_TRAVIS "Running test on travis-ci or not." OFF) +option(ON_COVERALLS "Generating code coverage data on coveralls or not." OFF) +option(COVERALLS_UPLOAD "Uploading the generated coveralls json." ON) + if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel" @@ -54,6 +57,7 @@ include(flags) include(cudnn) include(FindPythonModule) include(check_packages) +include(coveralls) # add PaddlePaddle version if(DEFINED ENV{PADDLE_VERSION}) diff --git a/README.md b/README.md index 1cc0444c0617a..66767d7ff8e4a 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # PaddlePaddle -| **`Linux`** | **`License`** | **`Chat Room`** | -|----------------|---------------|-----------------| -|[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle)|[![License](https://img.shields.io/badge/license-Apache%202.0-green.svg)](LICENSE)|[![Join the chat at https://gitter.im/PaddlePaddle/Deep_Learning](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/PaddlePaddle/Deep_Learning?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)| + +[![Build Status](https://travis-ci.org/baidu/Paddle.svg?branch=master)](https://travis-ci.org/baidu/Paddle) +[![Coverage Status](https://coveralls.io/repos/github/baidu/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/baidu/Paddle?branch=develop) +[![Join the chat at https://gitter.im/PaddlePaddle/Deep_Learning](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/PaddlePaddle/Deep_Learning?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![License](https://img.shields.io/badge/license-Apache%202.0-green.svg)](LICENSE) Welcome to the PaddlePaddle GitHub. diff --git a/cmake/coveralls.cmake b/cmake/coveralls.cmake new file mode 100644 index 0000000000000..9be7643819efd --- /dev/null +++ b/cmake/coveralls.cmake @@ -0,0 +1,103 @@ +# CMake script for code coverage. +# If _COVERALLS_UPLOAD is ON, it will upload json files to overalls.io automatically. + +# Param _COVERAGE_SRCS A list of coverage source files. +# Param _COVERALLS_UPLOAD Upload the result to coveralls. +# Param _CMAKE_SCRIPT_PATH CMake script path. +function(code_coverage _COVERAGE_SRCS _COVERALLS_UPLOAD _CMAKE_SCRIPT_PATH) + # clean previous gcov data. + file(REMOVE_RECURSE ${PROJECT_BINARY_DIR}/*.gcda) + + # find curl for upload JSON soon. + if (_COVERALLS_UPLOAD) + find_program(CURL_EXECUTABLE curl) + if (NOT CURL_EXECUTABLE) + message(FATAL_ERROR "Coveralls: curl not found!") + endif() + endif() + + # When passing a CMake list to an external process, the list + # will be converted from the format "1;2;3" to "1 2 3". + set(COVERAGE_SRCS "") + foreach (SINGLE_SRC ${_COVERAGE_SRCS}) + set(COVERAGE_SRCS "${COVERAGE_SRCS}*${SINGLE_SRC}") + endforeach() + + # query number of logical cores + cmake_host_system_information(RESULT core_size QUERY NUMBER_OF_LOGICAL_CORES) + # coveralls json file. + set(COVERALLS_FILE ${PROJECT_BINARY_DIR}/coveralls.json) + add_custom_target(coveralls_generate + # Run regress tests. + COMMAND ${CMAKE_CTEST_COMMAND} + -j ${core_size} + --output-on-failure + # Generate Gcov and translate it into coveralls JSON. + COMMAND ${CMAKE_COMMAND} + -DCOVERAGE_SRCS="${COVERAGE_SRCS}" + -DCOVERALLS_OUTPUT_FILE="${COVERALLS_FILE}" + -DCOV_PATH="${PROJECT_BINARY_DIR}" + -DPROJECT_ROOT="${PROJECT_SOURCE_DIR}" + -P "${_CMAKE_SCRIPT_PATH}/coverallsGcovJsons.cmake" + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Coveralls: generating coveralls output..." + ) + + if (_COVERALLS_UPLOAD) + message("COVERALLS UPLOAD: ON") + # Upload the JSON to coveralls. + add_custom_target(coveralls_upload + COMMAND ${CURL_EXECUTABLE} + -S -F json_file=@${COVERALLS_FILE} + https://coveralls.io/api/v1/jobs + DEPENDS coveralls_generate + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + COMMENT "Coveralls: uploading coveralls output...") + + add_custom_target(coveralls DEPENDS coveralls_upload) + else() + message("COVERALLS UPLOAD: OFF") + add_custom_target(coveralls DEPENDS coveralls_generate) + endif() +endfunction() + +if(ON_COVERALLS) + set(CMAKE_BUILD_TYPE "Debug") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") + + set(EXCLUDE_DIRS + "demo/" + "build/" + "tests/" + ".test_env/" + ) + + if(WITH_GPU) + file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" ".c" "*.cu") + else() + file(GLOB_RECURSE PADDLE_SOURCES RELATIVE "${PROJECT_SOURCE_DIR}" "*.cpp" "*.cc" "*.c") + endif() + + # exclude trivial files in PADDLE_SOURCES + foreach(EXCLUDE_DIR ${EXCLUDE_DIRS}) + foreach(TMP_PATH ${PADDLE_SOURCES}) + string(FIND ${TMP_PATH} ${EXCLUDE_DIR} EXCLUDE_DIR_FOUND) + if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1) + list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH}) + endif() + endforeach(TMP_PATH) + endforeach() + + # convert to absolute path + set(PADDLE_SRCS "") + foreach(PADDLE_SRC ${PADDLE_SOURCES}) + set(PADDLE_SRCS "${PADDLE_SRCS};${PROJECT_SOURCE_DIR}/${PADDLE_SRC}") + endforeach() + + code_coverage( + "${PADDLE_SRCS}" + ${COVERALLS_UPLOAD} + "${PROJECT_SOURCE_DIR}/cmake" + ) +endif() diff --git a/cmake/coverallsGcovJsons.cmake b/cmake/coverallsGcovJsons.cmake new file mode 100644 index 0000000000000..ae3530c3a0eeb --- /dev/null +++ b/cmake/coverallsGcovJsons.cmake @@ -0,0 +1,403 @@ +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# Copyright (C) 2014 Joakim Söderberg +# +# This is intended to be run by a custom target in a CMake project like this. +# 0. Compile program with coverage support. +# 1. Clear coverage data. (Recursively delete *.gcda in build dir) +# 2. Run the unit tests. +# 3. Run this script specifying which source files the coverage should be performed on. +# +# This script will then use gcov to generate .gcov files in the directory specified +# via the COV_PATH var. This should probably be the same as your cmake build dir. +# +# It then parses the .gcov files to convert them into the Coveralls JSON format: +# https://coveralls.io/docs/api +# + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8) + +# Since it's not possible to pass a CMake list properly in the +# "1;2;3" format to an external process, we have replaced the +# ";" with "*", so reverse that here so we get it back into the +# CMake list format. +string(REGEX REPLACE "\\*" ";" COVERAGE_SRCS ${COVERAGE_SRCS}) + +find_program(GCOV_EXECUTABLE gcov) +if (NOT GCOV_EXECUTABLE) + message(FATAL_ERROR "gcov not found! Aborting...") +endif() + +find_package(Git) + +# TODO: Add these git things to the coveralls json. +if (GIT_FOUND) + # Branch. + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref HEAD + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_BRANCH + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + + macro (git_log_format FORMAT_CHARS VAR_NAME) + execute_process( + COMMAND ${GIT_EXECUTABLE} log -1 --pretty=format:%${FORMAT_CHARS} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE ${VAR_NAME} + OUTPUT_STRIP_TRAILING_WHITESPACE + ) + endmacro() + + git_log_format(an GIT_AUTHOR_EMAIL) + git_log_format(ae GIT_AUTHOR_EMAIL) + git_log_format(cn GIT_COMMITTER_NAME) + git_log_format(ce GIT_COMMITTER_EMAIL) + git_log_format(B GIT_COMMIT_MESSAGE) + + message("Git exe: ${GIT_EXECUTABLE}") + message("Git branch: ${GIT_BRANCH}") + message("Git author: ${GIT_AUTHOR_NAME}") + message("Git e-mail: ${GIT_AUTHOR_EMAIL}") + message("Git commiter name: ${GIT_COMMITTER_NAME}") + message("Git commiter e-mail: ${GIT_COMMITTER_EMAIL}") + message("Git commit message: ${GIT_COMMIT_MESSAGE}") + +endif() + +############################# Macros ######################################### + +# +# This macro converts from the full path format gcov outputs: +# +# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov +# +# to the original source file path the .gcov is for: +# +# /path/to/project/root/subdir/the_file.c +# +macro(get_source_path_from_gcov_filename _SRC_FILENAME _GCOV_FILENAME) + + # /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov + # -> + # #path#to#project#root#subdir#the_file.c.gcov + get_filename_component(_GCOV_FILENAME_WEXT ${_GCOV_FILENAME} NAME) + + # #path#to#project#root#subdir#the_file.c.gcov -> /path/to/project/root/subdir/the_file.c + string(REGEX REPLACE "\\.gcov$" "" SRC_FILENAME_TMP ${_GCOV_FILENAME_WEXT}) + string(REGEX REPLACE "\#" "/" SRC_FILENAME_TMP ${SRC_FILENAME_TMP}) + set(${_SRC_FILENAME} "${SRC_FILENAME_TMP}") +endmacro() + +############################################################################## + +# Get the coverage data. +file(GLOB_RECURSE GCDA_FILES "${COV_PATH}" "*.gcda") +message("GCDA files:") + +# Get a list of all the object directories needed by gcov +# (The directories the .gcda files and .o files are found in) +# and run gcov on those. +foreach(GCDA ${GCDA_FILES}) + message("Process: ${GCDA}") + message("------------------------------------------------------------------------------") + get_filename_component(GCDA_DIR ${GCDA} PATH) + + # + # The -p below refers to "Preserve path components", + # This means that the generated gcov filename of a source file will + # keep the original files entire filepath, but / is replaced with #. + # Example: + # + # /path/to/project/root/build/CMakeFiles/the_file.dir/subdir/the_file.c.gcda + # ------------------------------------------------------------------------------ + # File '/path/to/project/root/subdir/the_file.c' + # Lines executed:68.34% of 199 + # /path/to/project/root/subdir/the_file.c:creating '#path#to#project#root#subdir#the_file.c.gcov' + # + # If -p is not specified then the file is named only "the_file.c.gcov" + # + execute_process( + COMMAND ${GCOV_EXECUTABLE} -p -o ${GCDA_DIR} ${GCDA} + WORKING_DIRECTORY ${GCDA_DIR} + ) +endforeach() + +# TODO: Make these be absolute path +file(GLOB_RECURSE ALL_GCOV_FILES "${COV_PATH}" "*.gcov") + +# Get only the filenames to use for filtering. +#set(COVERAGE_SRCS_NAMES "") +#foreach (COVSRC ${COVERAGE_SRCS}) +# get_filename_component(COVSRC_NAME ${COVSRC} NAME) +# message("${COVSRC} -> ${COVSRC_NAME}") +# list(APPEND COVERAGE_SRCS_NAMES "${COVSRC_NAME}") +#endforeach() + +# +# Filter out all but the gcov files we want. +# +# We do this by comparing the list of COVERAGE_SRCS filepaths that the +# user wants the coverage data for with the paths of the generated .gcov files, +# so that we only keep the relevant gcov files. +# +# Example: +# COVERAGE_SRCS = +# /path/to/project/root/subdir/the_file.c +# +# ALL_GCOV_FILES = +# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov +# /path/to/project/root/build/#path#to#project#root#subdir#other_file.c.gcov +# +# Result should be: +# GCOV_FILES = +# /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov +# +set(GCOV_FILES "") +#message("Look in coverage sources: ${COVERAGE_SRCS}") +message("\nFilter out unwanted GCOV files:") +message("===============================") + +set(COVERAGE_SRCS_REMAINING ${COVERAGE_SRCS}) + +foreach (GCOV_FILE ${ALL_GCOV_FILES}) + + # + # /path/to/project/root/build/#path#to#project#root#subdir#the_file.c.gcov + # -> + # /path/to/project/root/subdir/the_file.c + get_source_path_from_gcov_filename(GCOV_SRC_PATH ${GCOV_FILE}) + + # Is this in the list of source files? + # TODO: We want to match against relative path filenames from the source file root... + list(FIND COVERAGE_SRCS ${GCOV_SRC_PATH} WAS_FOUND) + + if (NOT WAS_FOUND EQUAL -1) + message("YES: ${GCOV_FILE}") + list(APPEND GCOV_FILES ${GCOV_FILE}) + + # We remove it from the list, so we don't bother searching for it again. + # Also files left in COVERAGE_SRCS_REMAINING after this loop ends should + # have coverage data generated from them (no lines are covered). + list(REMOVE_ITEM COVERAGE_SRCS_REMAINING ${GCOV_SRC_PATH}) + else() + message("NO: ${GCOV_FILE}") + endif() +endforeach() + +# TODO: Enable setting these +set(JSON_SERVICE_NAME "travis-ci") +set(JSON_SERVICE_JOB_ID $ENV{TRAVIS_JOB_ID}) + +set(JSON_TEMPLATE +"{ + \"service_name\": \"\@JSON_SERVICE_NAME\@\", + \"service_job_id\": \"\@JSON_SERVICE_JOB_ID\@\", + \"source_files\": \@JSON_GCOV_FILES\@ +}" +) + +set(SRC_FILE_TEMPLATE +"{ + \"name\": \"\@GCOV_SRC_REL_PATH\@\", + \"source_digest\": \"\@GCOV_CONTENTS_MD5\@\", + \"coverage\": \@GCOV_FILE_COVERAGE\@ + }" +) + +message("\nGenerate JSON for files:") +message("=========================") + +set(JSON_GCOV_FILES "[") + +# Read the GCOV files line by line and get the coverage data. +foreach (GCOV_FILE ${GCOV_FILES}) + + get_source_path_from_gcov_filename(GCOV_SRC_PATH ${GCOV_FILE}) + file(RELATIVE_PATH GCOV_SRC_REL_PATH "${PROJECT_ROOT}" "${GCOV_SRC_PATH}") + + # The new coveralls API doesn't need the entire source (Yay!) + # However, still keeping that part for now. Will cleanup in the future. + file(MD5 "${GCOV_SRC_PATH}" GCOV_CONTENTS_MD5) + message("MD5: ${GCOV_SRC_PATH} = ${GCOV_CONTENTS_MD5}") + + # Loads the gcov file as a list of lines. + # (We first open the file and replace all occurences of [] with _ + # because CMake will fail to parse a line containing unmatched brackets... + # also the \ to escaped \n in macros screws up things.) + # https://public.kitware.com/Bug/view.php?id=15369 + file(READ ${GCOV_FILE} GCOV_CONTENTS) + string(REPLACE "[" "_" GCOV_CONTENTS "${GCOV_CONTENTS}") + string(REPLACE "]" "_" GCOV_CONTENTS "${GCOV_CONTENTS}") + string(REPLACE "\\" "_" GCOV_CONTENTS "${GCOV_CONTENTS}") + file(WRITE ${GCOV_FILE}_tmp "${GCOV_CONTENTS}") + + file(STRINGS ${GCOV_FILE}_tmp GCOV_LINES) + list(LENGTH GCOV_LINES LINE_COUNT) + + # Instead of trying to parse the source from the + # gcov file, simply read the file contents from the source file. + # (Parsing it from the gcov is hard because C-code uses ; in many places + # which also happens to be the same as the CMake list delimeter). + file(READ ${GCOV_SRC_PATH} GCOV_FILE_SOURCE) + + string(REPLACE "\\" "\\\\" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + string(REGEX REPLACE "\"" "\\\\\"" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + string(REPLACE "\t" "\\\\t" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + string(REPLACE "\r" "\\\\r" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + string(REPLACE "\n" "\\\\n" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + # According to http://json.org/ these should be escaped as well. + # Don't know how to do that in CMake however... + #string(REPLACE "\b" "\\\\b" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + #string(REPLACE "\f" "\\\\f" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + #string(REGEX REPLACE "\u([a-fA-F0-9]{4})" "\\\\u\\1" GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}") + + # We want a json array of coverage data as a single string + # start building them from the contents of the .gcov + set(GCOV_FILE_COVERAGE "[") + + set(GCOV_LINE_COUNT 1) # Line number for the .gcov. + set(DO_SKIP 0) + foreach (GCOV_LINE ${GCOV_LINES}) + #message("${GCOV_LINE}") + # Example of what we're parsing: + # Hitcount |Line | Source + # " 8: 26: if (!allowed || (strlen(allowed) == 0))" + string(REGEX REPLACE + "^([^:]*):([^:]*):(.*)$" + "\\1;\\2;\\3" + RES + "${GCOV_LINE}") + + # Check if we should exclude lines using the Lcov syntax. + string(REGEX MATCH "LCOV_EXCL_START" START_SKIP "${GCOV_LINE}") + string(REGEX MATCH "LCOV_EXCL_END" END_SKIP "${GCOV_LINE}") + string(REGEX MATCH "LCOV_EXCL_LINE" LINE_SKIP "${GCOV_LINE}") + + set(RESET_SKIP 0) + if (LINE_SKIP AND NOT DO_SKIP) + set(DO_SKIP 1) + set(RESET_SKIP 1) + endif() + + if (START_SKIP) + set(DO_SKIP 1) + message("${GCOV_LINE_COUNT}: Start skip") + endif() + + if (END_SKIP) + set(DO_SKIP 0) + endif() + + list(LENGTH RES RES_COUNT) + + if (RES_COUNT GREATER 2) + list(GET RES 0 HITCOUNT) + list(GET RES 1 LINE) + list(GET RES 2 SOURCE) + + string(STRIP ${HITCOUNT} HITCOUNT) + string(STRIP ${LINE} LINE) + + # Lines with 0 line numbers are metadata and can be ignored. + if (NOT ${LINE} EQUAL 0) + + if (DO_SKIP) + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}null, ") + else() + # Translate the hitcount into valid JSON values. + if (${HITCOUNT} STREQUAL "#####") + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}0, ") + elseif (${HITCOUNT} STREQUAL "-") + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}null, ") + else() + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}${HITCOUNT}, ") + endif() + endif() + endif() + else() + message(WARNING "Failed to properly parse line (RES_COUNT = ${RES_COUNT}) ${GCOV_FILE}:${GCOV_LINE_COUNT}\n-->${GCOV_LINE}") + endif() + + if (RESET_SKIP) + set(DO_SKIP 0) + endif() + math(EXPR GCOV_LINE_COUNT "${GCOV_LINE_COUNT}+1") + endforeach() + + message("${GCOV_LINE_COUNT} of ${LINE_COUNT} lines read!") + + # Advanced way of removing the trailing comma in the JSON array. + # "[1, 2, 3, " -> "[1, 2, 3" + string(REGEX REPLACE ",[ ]*$" "" GCOV_FILE_COVERAGE ${GCOV_FILE_COVERAGE}) + + # Append the trailing ] to complete the JSON array. + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]") + + # Generate the final JSON for this file. + message("Generate JSON for file: ${GCOV_SRC_REL_PATH}...") + string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON) + + set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ") +endforeach() + +# Loop through all files we couldn't find any coverage for +# as well, and generate JSON for those as well with 0% coverage. +foreach(NOT_COVERED_SRC ${COVERAGE_SRCS_REMAINING}) + + # Loads the source file as a list of lines. + file(STRINGS ${NOT_COVERED_SRC} SRC_LINES) + + set(GCOV_FILE_COVERAGE "[") + set(GCOV_FILE_SOURCE "") + + foreach (SOURCE ${SRC_LINES}) + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}0, ") + + string(REPLACE "\\" "\\\\" SOURCE "${SOURCE}") + string(REGEX REPLACE "\"" "\\\\\"" SOURCE "${SOURCE}") + string(REPLACE "\t" "\\\\t" SOURCE "${SOURCE}") + string(REPLACE "\r" "\\\\r" SOURCE "${SOURCE}") + set(GCOV_FILE_SOURCE "${GCOV_FILE_SOURCE}${SOURCE}\\n") + endforeach() + + # Remove trailing comma, and complete JSON array with ] + string(REGEX REPLACE ",[ ]*$" "" GCOV_FILE_COVERAGE ${GCOV_FILE_COVERAGE}) + set(GCOV_FILE_COVERAGE "${GCOV_FILE_COVERAGE}]") + + # Generate the final JSON for this file. + message("Generate JSON for non-gcov file: ${NOT_COVERED_SRC}...") + string(CONFIGURE ${SRC_FILE_TEMPLATE} FILE_JSON) + set(JSON_GCOV_FILES "${JSON_GCOV_FILES}${FILE_JSON}, ") +endforeach() + +# Get rid of trailing comma. +string(REGEX REPLACE ",[ ]*$" "" JSON_GCOV_FILES ${JSON_GCOV_FILES}) +set(JSON_GCOV_FILES "${JSON_GCOV_FILES}]") + +# Generate the final complete JSON! +message("Generate final JSON...") +string(CONFIGURE ${JSON_TEMPLATE} JSON) + +file(WRITE "${COVERALLS_OUTPUT_FILE}" "${JSON}") +message("###########################################################################") +message("Generated coveralls JSON containing coverage data:") +message("${COVERALLS_OUTPUT_FILE}") +message("###########################################################################") diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index a73c32344c8ab..54e3320c8c158 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -6,17 +6,19 @@ if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then fi -cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON ${CMAKE_EXTRA} +cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_DOC=OFF -DWITH_TESTING=ON -DON_TRAVIS=ON -DON_COVERALLS=ON ${CMAKE_EXTRA} NPROC=1 if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then NRPOC=`nproc` + make -j $NPROC + make coveralls elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then NPROC=`sysctl -n hw.ncpu` + make -j $NPROC + env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" fi -make -j $NPROC -env CTEST_OUTPUT_ON_FAILURE=1 make test ARGS="-j $NPROC" sudo make install sudo paddle version From 568d9cff1d9afaddffda79a3844475c365c22d06 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 4 Nov 2016 06:13:35 -0500 Subject: [PATCH 075/180] Add Issue template to guide user submit good issue (#354) * Add issue template * Update ISSUE_TEMPLATE.md * Update ISSUE_TEMPLATE.md * Rename * Rename * Typo * Typo * Typo * Typo * Follow comments * Follow comments --- ISSUE_TEMPLATE.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 ISSUE_TEMPLATE.md diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000..66d6b8b4dd9a5 --- /dev/null +++ b/ISSUE_TEMPLATE.md @@ -0,0 +1,13 @@ +Thank you for contributing to PaddlePaddle. Submitting an issue is a great help for us. +Both Chinese and English issues are welcome. + +Before submitting the issue, look over the following criteria before handing your request in. + +- [ ] Was there a similar issue submitted or resolved before? You could search issue in the github. +- [ ] Did you go to the search engine for your question? +- [ ] Is my description of issue clear enough to reproduce this problem? + * If there are some errors occured, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc. + * If you could provide an [asciinema](https://asciinema.org/) record, that's awesome! We could help you solve the problem more quickly. +- [ ] Is my description of issue use the github markdown correctly? + * Please use correct markdown syntax for code, header, etc. + * You can reference [this page](https://guides.github.com/features/mastering-markdown/) for markdown syntax. From 6c3a678c9a537a6b37c189490c646790860757c0 Mon Sep 17 00:00:00 2001 From: emailweixu Date: Fri, 4 Nov 2016 10:37:22 -0700 Subject: [PATCH 076/180] Add elementwise math operations (#343) * Add elementwise math operations This allows use to use expressions like: y=log(1+exp(x)) Also added unittests for ActivationFunction * Enforce keyword arguments for non-positional arguments * Add LogActivation to doc --- .../trainer_config_helpers/activations.rst | 7 ++ .../activations/ActivationFunction.cpp | 29 ++++++++ .../gserver/activations/ActivationFunction.h | 2 + paddle/gserver/tests/CMakeLists.txt | 7 ++ paddle/gserver/tests/test_ActivationGrad.cpp | 66 +++++++++++++++++++ python/paddle/trainer/config_parser.py | 10 +-- .../trainer_config_helpers/activations.py | 9 +++ .../default_decorators.py | 9 ++- .../paddle/trainer_config_helpers/layers.py | 2 +- python/paddle/trainer_config_helpers/math.py | 64 ++++++++++++++++++ .../tests/configs/check.md5 | 11 ++-- .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/math_ops.py | 24 +++++++ 13 files changed, 229 insertions(+), 13 deletions(-) create mode 100644 paddle/gserver/tests/test_ActivationGrad.cpp create mode 100644 python/paddle/trainer_config_helpers/math.py create mode 100644 python/paddle/trainer_config_helpers/tests/configs/math_ops.py diff --git a/doc/ui/api/trainer_config_helpers/activations.rst b/doc/ui/api/trainer_config_helpers/activations.rst index c4e14ed779efb..070ed03ab6cc9 100644 --- a/doc/ui/api/trainer_config_helpers/activations.rst +++ b/doc/ui/api/trainer_config_helpers/activations.rst @@ -32,6 +32,13 @@ LinearActivation .. automodule:: paddle.trainer_config_helpers.activations :members: LinearActivation :noindex: + +LogActivation +================== + +.. automodule:: paddle.trainer_config_helpers.activations + :members: LogActivation + :noindex: SquareActivation ================ diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/gserver/activations/ActivationFunction.cpp index 9918d20d9082a..27eed75d4d76c 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/gserver/activations/ActivationFunction.cpp @@ -295,6 +295,7 @@ void forward(Argument& act) { void backward(Argument& act) { act.grad->squareDerivative(*act.in); } END_DEFINE_ACTIVATION(square) + /** * @brief Exponential Activation. * \f[ @@ -307,8 +308,36 @@ void forward(Argument& act) { act.value->exp(*act.value); } void backward(Argument& act) { act.grad->expDerivative(*act.value); } END_DEFINE_ACTIVATION(exponential) +/** + * @brief Logarithm Activation. + * \f[ + * f(z) = log(z) + * \f] + */ +BEGIN_DEFINE_ACTIVATION(log) +void forward(Argument& act) { + SetDevice device(act.deviceId); + Matrix::resizeOrCreate(act.in, act.value->getHeight(), act.value->getWidth(), + /* trans */ false, useGpu(act.deviceId)); + + act.in->copyFrom(*act.value); + act.value->log(*act.value); +} + +void backward(Argument& act) { act.grad->dotDiv(*act.grad, *act.in); } +END_DEFINE_ACTIVATION(log) + ActivationFunction* ActivationFunction::create(const std::string& type) { return gActivationRegistrar.createByType(type); } +std::vector ActivationFunction::getAllRegisteredTypes() { + std::vector types; + gActivationRegistrar.forEachType([&](const std::string& type) { + types.push_back(type); + }); + return types; +} + + } // namespace paddle diff --git a/paddle/gserver/activations/ActivationFunction.h b/paddle/gserver/activations/ActivationFunction.h index 29860b4a736c3..c483372256c03 100644 --- a/paddle/gserver/activations/ActivationFunction.h +++ b/paddle/gserver/activations/ActivationFunction.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include +#include namespace paddle { @@ -32,6 +33,7 @@ struct Argument; class ActivationFunction { public: static ActivationFunction* create(const std::string& type); + static std::vector getAllRegisteredTypes(); ActivationFunction() {} diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index ff2abf7697317..26ee2b3aae64a 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -20,6 +20,13 @@ add_unittest_without_exec(test_LayerGrad add_test(NAME test_LayerGrad COMMAND test_LayerGrad) +add_unittest_without_exec(test_ActivationGrad + test_ActivationGrad.cpp + LayerGradUtil.cpp + TestUtil.cpp) +add_test(NAME test_ActivationGrad + COMMAND test_ActivationGrad) + ################## test_Evaluator ####################### add_unittest(test_Evaluator test_Evaluator.cpp diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/gserver/tests/test_ActivationGrad.cpp new file mode 100644 index 0000000000000..2c5d17090dfc7 --- /dev/null +++ b/paddle/gserver/tests/test_ActivationGrad.cpp @@ -0,0 +1,66 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "paddle/gserver/layers/DataLayer.h" +#include "ModelConfig.pb.h" +#include "paddle/trainer/Trainer.h" + +#include "TestUtil.h" +#include "LayerGradUtil.h" + +using namespace paddle; // NOLINT +using namespace std; // NOLINT + +P_DECLARE_bool(use_gpu); +P_DECLARE_bool(thread_local_rand_use_global_seed); + +void testActivation(const string& act) { + LOG(INFO) << "test activation: " << act; + size_t size = 10; + TestConfig config; + config.biasSize = 0; + config.layerConfig.set_type("addto"); + config.layerConfig.set_size(size); + config.layerConfig.set_active_type(act); + config.inputDefs.push_back({INPUT_DATA, "layer_0", size, 0}); + config.layerConfig.add_inputs(); + for (auto useGpu : {false, true}) { + testLayerGrad(config, + act + "_activation", + 100, + /* trans= */false, + useGpu, + /* useWeight */true); + } +} + +TEST(Activation, activation) { + auto types = ActivationFunction::getAllRegisteredTypes(); + std::set excluded{"sequence_softmax"}; + for (auto type : types) { + if (excluded.count(type)) continue; + testActivation(type); + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + FLAGS_thread_local_rand_use_global_seed = true; + srand(1); + return RUN_ALL_TESTS(); +} diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index e9098943165fd..e9038fea8a208 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -2573,8 +2573,9 @@ def __init__( for input in self.inputs: psize += input.calc_bias_size() - self.config.bias_size = psize - self.create_bias_parameter(bias, psize) + if bias: + self.config.bias_size = psize + self.create_bias_parameter(bias, psize) if error_clipping_threshold is not None: self.config.error_clipping_threshold = error_clipping_threshold @@ -2659,8 +2660,9 @@ def __init__( for input in self.inputs: psize += input.calc_bias_size() - self.config.bias_size = psize - self.create_bias_parameter(bias, psize) + if bias: + self.config.bias_size = psize + self.create_bias_parameter(bias, psize) @config_layer('recurrent') class RecurrentLayer(LayerBase): diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index 292014519374e..ad5cdc0a0eb13 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -199,3 +199,12 @@ class ExpActivation(BaseActivation): f(z) = e^z. """ def __init__(self): BaseActivation.__init__(self, 'exponential', False) + +class LogActivation(BaseActivation): + """ + Logarithm Activation. + + .. math:: + f(z) = log(z) + """ + def __init__(self): BaseActivation.__init__(self, 'log', False) diff --git a/python/paddle/trainer_config_helpers/default_decorators.py b/python/paddle/trainer_config_helpers/default_decorators.py index b20aebc685fe5..be00f48b457c1 100644 --- a/python/paddle/trainer_config_helpers/default_decorators.py +++ b/python/paddle/trainer_config_helpers/default_decorators.py @@ -13,6 +13,7 @@ # limitations under the License. import functools +import inspect from .attrs import ParamAttr from .activations import TanhActivation from paddle.trainer.config_parser import * @@ -37,8 +38,12 @@ def __impl__(func): @functools.wraps(func) def __wrapper__(*args, **kwargs): if len(args) != 0: - logger.warning("please use keyword arguments in paddle config.") - + argspec = inspect.getargspec(func) + num_positional = len(argspec.args) + if argspec.defaults: + num_positional -= len(argspec.defaults) + if not argspec.varargs and len(args) > num_positional: + logger.fatal("Must use keyword arguments for non-positional args") for name in param_names: if not_set_callback(kwargs, name): # Not set kwargs[name] = default_factory(func) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 9a23c02431d18..49f0ff3289db7 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -564,7 +564,7 @@ def __init__(self, name, size, act, bias_attr, layer_attr, self.inputs = [] self.finalized = False - def __add__(self, other): + def __iadd__(self, other): """ + += operator :param other: Other projection. diff --git a/python/paddle/trainer_config_helpers/math.py b/python/paddle/trainer_config_helpers/math.py new file mode 100644 index 0000000000000..e35849b77ac53 --- /dev/null +++ b/python/paddle/trainer_config_helpers/math.py @@ -0,0 +1,64 @@ +# Copyright (c) 2016 Baidu, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .layers import LayerOutput, mixed_layer, identity_projection, \ + slope_intercept_layer +from .attrs import is_compatible_with +from .default_decorators import * +import activations as act + +__all__ = [] + +def register_unary_math_op(op_name, act): + def op(input, name=None): + return mixed_layer(input=[identity_projection(input=input)], + name=name, + act=act) + op = wrap_name_default(op_name)(op) + op.__doc__ = type(act).__doc__ + globals()[op_name] = op + __all__.append(op_name) + +register_unary_math_op('exp', act.ExpActivation()) +register_unary_math_op('log', act.LogActivation()) +register_unary_math_op('abs', act.AbsActivation()) +register_unary_math_op('sigmoid', act.SigmoidActivation()) +register_unary_math_op('tanh', act.TanhActivation()) +register_unary_math_op('square', act.SquareActivation()) + +def add(layeroutput, other): + if is_compatible_with(other, float): + return slope_intercept_layer(input=layeroutput, intercept=other) + assert isinstance(other, LayerOutput) + return mixed_layer(input=[identity_projection(input=layeroutput), + identity_projection(input=other)]) + +LayerOutput.__radd__ = add +LayerOutput.__add__ = add + +def sub(layeroutput, other): + if is_compatible_with(other, float): + return slope_intercept_layer(input=layeroutput, intercept=other) + assert isinstance(other, LayerOutput) + neg = slope_intercept_layer(input=other, slope=-1.0) + return mixed_layer(input=[identity_projection(input=layeroutput), + identity_projection(input=neg)]) + +LayerOutput.__sub__ = sub + +def rsub(layeroutput, other): + neg = slope_intercept_layer(input=layeroutput, slope=-1.0) + return add(neg, other) + +LayerOutput.__rsub__ = rsub diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 72dfdad7bdd40..93d129b765e19 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -1,11 +1,11 @@ 86c0815275a9d5eb902e23c6a592f58a img_layers.protostr a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr 9c038249ec8ff719753a746cdb04c026 layer_activations.protostr -34e04043cbb12931c47fa44ec50eeffc projections.protostr +5913f87b39cee3b2701fa158270aca26 projections.protostr 7334ba0a4544f0623231330fc51d390d shared_fc.protostr -bb8e233b05b8e07f9ed386b7aee4f2c6 shared_lstm.protostr +8b8b6bb128a7dfcc937be86145f53e2f shared_lstm.protostr 6b39e34beea8dfb782bee9bd3dea9eb5 simple_rnn_layers.protostr -f98e79e1630d5eb827c300e64836d269 test_bi_grumemory.protostr +4e78f0ded79f6fefb58ca0c104b57c79 test_bi_grumemory.protostr 0fc1409600f1a3301da994ab9d28b0bf test_cost_layers.protostr 6cd5f28a3416344f20120698470e0a4c test_cost_layers_with_weight.protostr 144bc6d3a509de74115fa623741797ed test_expand_layer.protostr @@ -16,7 +16,8 @@ d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr 5433ed33d4e7414eaf658f2a55946186 test_maxout.protostr 251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr -fded24727338fb8ce44d9951ed8aea08 test_rnn_group.protostr +2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr 67d6fde3afb54f389d0ce4ff14726fe1 test_sequence_pooling.protostr f586a548ef4350ba1ed47a81859a64cb unused_layers.protostr -f937a5a6e7e8864b4d8cf56b0f7c7f44 util_layers.protostr +8122477f4f65244580cec09edc590041 util_layers.protostr +dcd76bebb5f9c755f481c26192917818 math_ops.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 6a31ceabdf36d..9e23bd1fe2bf5 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -9,7 +9,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -test_maxout test_bi_grumemory) +test_maxout test_bi_grumemory math_ops) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py new file mode 100644 index 0000000000000..fe515b7029336 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py @@ -0,0 +1,24 @@ +from paddle.trainer_config_helpers import * +from paddle.trainer_config_helpers import math + +settings( + batch_size=1000, + learning_rate=1e-5 +) + +x = data_layer(name='data', size=100) +x = math.exp(x) +x = math.log(x) +x = math.abs(x) +x = math.sigmoid(x) +x = math.square(x) +x = math.square(x) +y = 1 + x +y = y + 1 +y = x + y +y = y - x +y = y - 2 +y = 2 - y + +outputs(y) + From 36bda94eb4a686297f234bbb7d46423ab344a60e Mon Sep 17 00:00:00 2001 From: lzhao4ever Date: Fri, 4 Nov 2016 17:02:04 -0700 Subject: [PATCH 077/180] include mkl_lapacke.h (#359) --- paddle/math/MathFunctions.h | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/math/MathFunctions.h b/paddle/math/MathFunctions.h index cad0e4740b8c1..b322bd2bd7194 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/math/MathFunctions.h @@ -17,6 +17,7 @@ limitations under the License. */ #ifdef PADDLE_USE_MKL #include +#include #else extern "C" { #include From 8ff9aa3d2adb20ea4bafd8b0d9e0ee0411038955 Mon Sep 17 00:00:00 2001 From: gangliao Date: Sat, 5 Nov 2016 00:39:47 -0700 Subject: [PATCH 078/180] Update ISSUE_TEMPLATE.md (#357) --- ISSUE_TEMPLATE.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 66d6b8b4dd9a5..b70d66dc259af 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,13 +1,14 @@ Thank you for contributing to PaddlePaddle. Submitting an issue is a great help for us. Both Chinese and English issues are welcome. +It's hard to solve a problem when important details are missing. Before submitting the issue, look over the following criteria before handing your request in. -- [ ] Was there a similar issue submitted or resolved before? You could search issue in the github. -- [ ] Did you go to the search engine for your question? -- [ ] Is my description of issue clear enough to reproduce this problem? - * If there are some errors occured, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc. - * If you could provide an [asciinema](https://asciinema.org/) record, that's awesome! We could help you solve the problem more quickly. -- [ ] Is my description of issue use the github markdown correctly? - * Please use correct markdown syntax for code, header, etc. - * You can reference [this page](https://guides.github.com/features/mastering-markdown/) for markdown syntax. +- [ ] Was there a similar issue submitted or resolved before ? You could search issue in the github. +- [ ] Did you retrieve your issue from widespread search engines ? +- [ ] Is my description of the issue clear enough to reproduce this problem? + * If some errors occured, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc. + * If you use an recording [asciinema](https://asciinema.org/) to show what you are doing to make it happen, that's awesome! We could help you solve the problem more quickly. +- [ ] Is my description of the issue use the github markdown correctly? + * Please use the proper markdown syntaxes for styling all forms of writing, e.g, source code, error information, etc. + * Check out [this page](https://guides.github.com/features/mastering-markdown/) to find out much more about markdown. From 744dba4abdedd74504c613d408221c075acc8cb9 Mon Sep 17 00:00:00 2001 From: backyes Date: Sat, 5 Nov 2016 16:01:41 +0800 Subject: [PATCH 079/180] add rdma cmake support (#284) * add rdma cmake support * move rdma related code to rdma.cmake --- CMakeLists.txt | 11 +++++-- cmake/rdma.cmake | 76 ++++++++++++++++++++++++++++++++++++++++++++++++ cmake/util.cmake | 10 +++++++ 3 files changed, 94 insertions(+), 3 deletions(-) create mode 100644 cmake/rdma.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 93cdcd4a75b70..dd45f49b2f91a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -52,6 +52,9 @@ endif() include(enableCXX11) include(cpplint) include(ccache) +if(WITH_RDMA) + include(rdma) +endif() include(util) include(flags) include(cudnn) @@ -133,9 +136,11 @@ else(WITH_PYTHON) add_definitions(-DPADDLE_NO_PYTHON) endif(WITH_PYTHON) -if(NOT WITH_RDMA) - add_definitions(-DPADDLE_DISABLE_RDMA) -endif() +if(WITH_RDMA) + include_directories("${RDMA_INC_DIR}") +else(WITH_RDMA) + add_definitions(-DPADDLE_DISABLE_RDMA) +endif(WITH_RDMA) if(WITH_GLOG) add_definitions(-DPADDLE_USE_GLOG) diff --git a/cmake/rdma.cmake b/cmake/rdma.cmake new file mode 100644 index 0000000000000..e9a4da79aa92a --- /dev/null +++ b/cmake/rdma.cmake @@ -0,0 +1,76 @@ +# user should download rdma first from subversion repository + +# execute following instruction to download svn mannally +# svn co https://svn.baidu.com/sys/ip/trunk/rdma/sockrdmav1 rdma/ +# svn co https://svn.baidu.com/sys/ip/trunk/rdma/thirdparty rdma/ +# we use static output in svn repositories to avoid implict bugs from not standard runtime env. + +set(RDMA_ROOT $ENV{RDMA_ROOT} CACHE PATH "Folder contains RDMA sock library and thirdparty library") + +function(generate_rdma_links) + #redirect to current DIR to isolate the pollution from system runtime environment + #it can benifits unified control for different gcc environment. + #e.g, by default gcc48 did not refer /usr/lib64 which could contain low version + #runtime libraries that will crash process while loading it. That redirect trick + #can fix it. + execute_process( + COMMAND mkdir -p librdma + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so.1 + COMMAND ln -s -f /usr/lib64/libibverbs.so.1.0.0 librdma/libibverbs.so + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so.1 + COMMAND ln -s -f /usr/lib64/librdmacm.so.1.0.0 librdma/librdmacm.so + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) +endfunction(generate_rdma_links) + + +#check and set headers +find_path(RDMA_INC_SXISOCK sxi_sock.h PATHS ${RDMA_ROOT}/sockrdmav1/output/include) +find_path(RDMA_INC_XIO libxio.h PATHS ${RDMA_ROOT}/thirdparty/output/accelio) +find_path(RDMA_INC_EVENT event2 PATHS ${RDMA_ROOT}/thirdparty/output/libevent) +find_path(RDMA_INC_NUMA numa.h PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) + +#check and set libs +find_library(RDMA_LIB_SXISOCK NAMES sxisock PATHS ${RDMA_ROOT}/sockrdmav1/output) +find_library(RDMA_LIB_XIO NAMES xio PATHS ${RDMA_ROOT}/thirdparty/output/accelio) +find_library(RDMA_LIB_EVENT NAMES event PATHS ${RDMA_ROOT}/thirdparty/output/libevent) +find_library(RDMA_LIB_EVENT_CORE NAMES event_core PATHS ${RDMA_ROOT}/thirdparty/output/libevent) +find_library(RDMA_LIB_EVENT_EXTRA NAMES event_extra PATHS ${RDMA_ROOT}/thirdparty/output/libevent) +find_library(RDMA_LIB_EVENT_PTHREADS NAMES event_pthreads PATHS ${RDMA_ROOT}/thirdparty/output/libevent) +find_library(RDMA_LIB_NUMA NAMES numa PATHS ${RDMA_ROOT}/thirdparty/output/libnuma) + +if( + RDMA_INC_SXISOCK AND + RDMA_INC_XIO AND + RDMA_INC_EVENT AND + RDMA_INC_NUMA AND + RDMA_LIB_SXISOCK AND + RDMA_LIB_XIO AND + RDMA_LIB_EVENT AND + RDMA_LIB_EVENT_CORE AND + RDMA_LIB_EVENT_EXTRA AND + RDMA_LIB_EVENT_PTHREADS AND + RDMA_LIB_NUMA + ) + + set(RDMA_INC_DIR + ${RDMA_INC_SXISOCK} + ${RDMA_INC_XIO} + ${RDMA_INC_EVENT} + ${RDMA_INC_NUMA}) + set(RDMA_LIBS + ${RDMA_LIB_SXISOCK} + ${RDMA_LIB_XIO} + ${RDMA_LIB_EVENT} + ${RDMA_LIB_EVENT_CORE} + ${RDMA_LIB_EVENT_EXTRA} + ${RDMA_LIB_EVENT_PTHREADS} + ${RDMA_LIB_NUMA} + ) + set(RDMA_LD_FLAGS "-L./librdma -libverbs -lrdmacm -Xlinker -rpath ./librdma") + return() +endif() + +#if this module is not called, RDMA_INC_DIR RDMA_LIBS will be null, so top module always refer this variable + +message(FATAL_ERROR, "RDMA libraries are not found, try to set RDMA_ROOT or check all related libraries.") diff --git a/cmake/util.cmake b/cmake/util.cmake index 0fa36f070cc11..3f78cd08c3905 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -67,6 +67,10 @@ endmacro() # # It will handle WITH_PYTHON/WITH_GLOG etc. function(link_paddle_exe TARGET_NAME) + if(WITH_RDMA) + generate_rdma_links() + endif() + if(WITH_METRIC) if(WITH_GPU) set(METRIC_LIBS paddle_metric_learning paddle_dserver_lib metric metric_cpu) @@ -109,6 +113,12 @@ function(link_paddle_exe TARGET_NAME) ${ZLIB_LIBRARIES} ${INTERAL_LIBS} ${CMAKE_DL_LIBS}) + + if(WITH_RDMA) + target_link_libraries(${TARGET_NAME} + ${RDMA_LD_FLAGS} + ${RDMA_LIBS}) + endif() if(WITH_PYTHON) target_link_libraries(${TARGET_NAME} From 93e4d0cce6d3048f77aa46dc9d4ee5c3fb777d0c Mon Sep 17 00:00:00 2001 From: backyes Date: Sat, 5 Nov 2016 16:38:11 +0800 Subject: [PATCH 080/180] using find_package for swig (#334) --- CMakeLists.txt | 3 ++- cmake/swig.cmake | 22 ---------------------- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dd45f49b2f91a..282e3e199ef44 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,7 @@ set(PADDLE_VERSION ${PADDLE_MAJOR_VERSION}.${PADDLE_MINOR_VERSION}.${PADDLE_PATC set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake") set(PROJ_ROOT ${CMAKE_SOURCE_DIR}) include(package) -include(swig) +find_package(SWIG 2.0) find_package(CUDA QUIET) find_package(Protobuf REQUIRED) find_package(PythonLibs 2.7 REQUIRED) @@ -60,6 +60,7 @@ include(flags) include(cudnn) include(FindPythonModule) include(check_packages) +include(swig) include(coveralls) # add PaddlePaddle version diff --git a/cmake/swig.cmake b/cmake/swig.cmake index 160d7ee56a9c6..97e87aa947791 100644 --- a/cmake/swig.cmake +++ b/cmake/swig.cmake @@ -1,25 +1,3 @@ -find_program( - SWIG_BINARY_PATH - swig) - -if(${SWIG_BINARY_PATH} STREQUAL "SWIG_BINARY_PATH-NOTFOUND") - set(SWIG_FOUND OFF) -else() - set(SWIG_FOUND ON) -endif() - -set(MIN_SWIG_VERSION 2) -if(SWIG_FOUND) - execute_process(COMMAND sh -c "${SWIG_BINARY_PATH} -version | grep Version | cut -f3 -d' '" - OUTPUT_VARIABLE _SWIG_VERSION - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(${_SWIG_VERSION} VERSION_LESS ${MIN_SWIG_VERSION}) - message("swig version ${MIN_SWIG_VERSION} or greater is needed for generating python api. " - "Only version ${_SWIG_VERSION} is found. Set SWIG_FOUND to FALSE") - set(SWIG_FOUND FALSE) - endif(${_SWIG_VERSION} VERSION_LESS ${MIN_SWIG_VERSION}) -endif(SWIG_FOUND) - function(generate_python_api target_name) add_custom_command(OUTPUT ${PROJ_ROOT}/paddle/py_paddle/swig_paddle.py ${PROJ_ROOT}/paddle/Paddle_wrap.cxx From c64cd6feb4b8f7c052ed8f4934dc0b9059969d84 Mon Sep 17 00:00:00 2001 From: wenboyang Date: Mon, 7 Nov 2016 12:53:41 +0800 Subject: [PATCH 081/180] Use diff to compare config unittest (#363) Fix #342 --- .../tests/configs/check.md5 | 23 - .../tests/configs/generate_protostr.sh | 4 +- .../configs/protostr/img_layers.protostr | 176 +++++ .../configs/protostr/last_first_seq.protostr | 69 ++ .../protostr/layer_activations.protostr | 423 ++++++++++++ .../tests/configs/protostr/math_ops.protostr | 235 +++++++ .../configs/protostr/projections.protostr | 315 +++++++++ .../tests/configs/protostr/shared_fc.protostr | 125 ++++ .../configs/protostr/shared_lstm.protostr | 393 +++++++++++ .../protostr/simple_rnn_layers.protostr | 418 +++++++++++ .../protostr/test_bi_grumemory.protostr | 152 ++++ .../protostr/test_cost_layers.protostr | 289 ++++++++ .../test_cost_layers_with_weight.protostr | 111 +++ .../protostr/test_expand_layer.protostr | 56 ++ .../tests/configs/protostr/test_fc.protostr | 98 +++ .../protostr/test_grumemory_layer.protostr | 51 ++ .../configs/protostr/test_hsigmoid.protostr | 62 ++ .../protostr/test_lstmemory_layer.protostr | 53 ++ .../configs/protostr/test_maxout.protostr | 209 ++++++ .../configs/protostr/test_ntm_layers.protostr | 225 ++++++ .../protostr/test_print_layer.protostr | 26 + .../configs/protostr/test_rnn_group.protostr | 650 ++++++++++++++++++ .../protostr/test_sequence_pooling.protostr | 111 +++ .../configs/protostr/unused_layers.protostr | 27 + .../configs/protostr/util_layers.protostr | 81 +++ .../tests/configs/run_tests.sh | 14 +- 26 files changed, 4371 insertions(+), 25 deletions(-) delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/check.md5 create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/layer_activations.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/shared_fc.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_expand_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_fc.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_grumemory_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_hsigmoid.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_lstmemory_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_maxout.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_ntm_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_print_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/unused_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 deleted file mode 100644 index 93d129b765e19..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ /dev/null @@ -1,23 +0,0 @@ -86c0815275a9d5eb902e23c6a592f58a img_layers.protostr -a5d9259ff1fd7ca23d0ef090052cb1f2 last_first_seq.protostr -9c038249ec8ff719753a746cdb04c026 layer_activations.protostr -5913f87b39cee3b2701fa158270aca26 projections.protostr -7334ba0a4544f0623231330fc51d390d shared_fc.protostr -8b8b6bb128a7dfcc937be86145f53e2f shared_lstm.protostr -6b39e34beea8dfb782bee9bd3dea9eb5 simple_rnn_layers.protostr -4e78f0ded79f6fefb58ca0c104b57c79 test_bi_grumemory.protostr -0fc1409600f1a3301da994ab9d28b0bf test_cost_layers.protostr -6cd5f28a3416344f20120698470e0a4c test_cost_layers_with_weight.protostr -144bc6d3a509de74115fa623741797ed test_expand_layer.protostr -2378518bdb71e8c6e888b1842923df58 test_fc.protostr -8bb44e1e5072d0c261572307e7672bda test_grumemory_layer.protostr -1f3510672dce7a9ed25317fc58579ac7 test_hsigmoid.protostr -d350bd91a0dc13e854b1364c3d9339c6 test_lstmemory_layer.protostr -5433ed33d4e7414eaf658f2a55946186 test_maxout.protostr -251a948ba41c1071afcd3d9cf9c233f7 test_ntm_layers.protostr -e6ff04e70aea27c7b06d808cc49c9497 test_print_layer.protostr -2a75dd33b640c49a8821c2da6e574577 test_rnn_group.protostr -67d6fde3afb54f389d0ce4ff14726fe1 test_sequence_pooling.protostr -f586a548ef4350ba1ed47a81859a64cb unused_layers.protostr -8122477f4f65244580cec09edc590041 util_layers.protostr -dcd76bebb5f9c755f481c26192917818 math_ops.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 9e23bd1fe2bf5..77774f6fcfafd 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -4,6 +4,8 @@ set -e cd `dirname $0` export PYTHONPATH=$PWD/../../../../ +protostr=$PWD/protostr + configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid @@ -15,5 +17,5 @@ test_maxout test_bi_grumemory math_ops) for conf in ${configs[*]} do echo "Generating " $conf - python -m paddle.utils.dump_config $conf.py > $conf.protostr + python -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unitest done diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr new file mode 100644 index 0000000000000..1f262af21126c --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_layers.protostr @@ -0,0 +1,176 @@ +type: "nn" +layers { + name: "image" + type: "data" + size: 65536 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconv" + size: 3297856 + active_type: "" + inputs { + input_layer_name: "image" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 32 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 1 + output_x: 227 + img_size: 256 + caffe_mode: true + filter_size_y: 32 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 64 + shared_biases: true +} +layers { + name: "__batch_norm_0__" + type: "batch_norm" + size: 3297856 + active_type: "relu" + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w0" + image_conf { + channels: 64 + img_size: 227 + } + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w1" + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w2" + } + bias_parameter_name: "___batch_norm_0__.wbias" + moving_average_fraction: 0.9 +} +layers { + name: "__crmnorm_0__" + type: "norm" + size: 3297856 + active_type: "" + inputs { + input_layer_name: "__batch_norm_0__" + norm_conf { + norm_type: "cmrnorm-projection" + channels: 64 + size: 32 + scale: 0.0004 + pow: 0.75 + output_x: 227 + img_size: 227 + blocked: false + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 2458624 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + pool_conf { + pool_type: "max-projection" + channels: 64 + size_x: 32 + stride: 1 + output_x: 196 + img_size: 227 + padding: 0 + size_y: 32 + stride_y: 1 + output_y: 196 + img_size_y: 227 + padding_y: 0 + } + } +} +parameters { + name: "___conv_0__.w0" + size: 65536 + initial_mean: 0.0 + initial_std: 0.0441941738242 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 64 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w0" + size: 64 + initial_mean: 1.0 + initial_std: 0.0 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w1" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.w2" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "image" +output_layer_names: "__pool_0__" +output_layer_names: "__crmnorm_0__" +sub_models { + name: "root" + layer_names: "image" + layer_names: "__conv_0__" + layer_names: "__batch_norm_0__" + layer_names: "__crmnorm_0__" + layer_names: "__pool_0__" + input_layer_names: "image" + output_layer_names: "__pool_0__" + output_layer_names: "__crmnorm_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr new file mode 100644 index 0000000000000..7b2911f8e367e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/last_first_seq.protostr @@ -0,0 +1,69 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + select_first: true + trans_type: "seq" +} +layers { + name: "__first_seq_1__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + trans_type: "seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + trans_type: "non-seq" +} +input_layer_names: "data" +output_layer_names: "__first_seq_0__" +output_layer_names: "__first_seq_1__" +output_layer_names: "__last_seq_0__" +output_layer_names: "__last_seq_1__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__first_seq_0__" + layer_names: "__first_seq_1__" + layer_names: "__last_seq_0__" + layer_names: "__last_seq_1__" + input_layer_names: "data" + output_layer_names: "__first_seq_0__" + output_layer_names: "__first_seq_1__" + output_layer_names: "__last_seq_0__" + output_layer_names: "__last_seq_1__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/layer_activations.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/layer_activations.protostr new file mode 100644 index 0000000000000..ecf39e4d32167 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/layer_activations.protostr @@ -0,0 +1,423 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "layer_0" + type: "fc" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_0.w0" + } + bias_parameter_name: "_layer_0.wbias" +} +layers { + name: "layer_1" + type: "fc" + size: 100 + active_type: "sigmoid" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_1.w0" + } + bias_parameter_name: "_layer_1.wbias" +} +layers { + name: "layer_2" + type: "fc" + size: 100 + active_type: "softmax" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_2.w0" + } + bias_parameter_name: "_layer_2.wbias" +} +layers { + name: "layer_3" + type: "fc" + size: 100 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_3.w0" + } + bias_parameter_name: "_layer_3.wbias" +} +layers { + name: "layer_4" + type: "fc" + size: 100 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_4.w0" + } + bias_parameter_name: "_layer_4.wbias" +} +layers { + name: "layer_5" + type: "fc" + size: 100 + active_type: "exponential" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_5.w0" + } + bias_parameter_name: "_layer_5.wbias" +} +layers { + name: "layer_6" + type: "fc" + size: 100 + active_type: "relu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_6.w0" + } + bias_parameter_name: "_layer_6.wbias" +} +layers { + name: "layer_7" + type: "fc" + size: 100 + active_type: "brelu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_7.w0" + } + bias_parameter_name: "_layer_7.wbias" +} +layers { + name: "layer_8" + type: "fc" + size: 100 + active_type: "softrelu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_8.w0" + } + bias_parameter_name: "_layer_8.wbias" +} +layers { + name: "layer_9" + type: "fc" + size: 100 + active_type: "stanh" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_9.w0" + } + bias_parameter_name: "_layer_9.wbias" +} +layers { + name: "layer_10" + type: "fc" + size: 100 + active_type: "abs" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_10.w0" + } + bias_parameter_name: "_layer_10.wbias" +} +layers { + name: "layer_11" + type: "fc" + size: 100 + active_type: "square" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_11.w0" + } + bias_parameter_name: "_layer_11.wbias" +} +parameters { + name: "_layer_0.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_0.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_1.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_1.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_2.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_2.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_3.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_3.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_4.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_4.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_5.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_5.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_6.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_6.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_7.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_7.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_8.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_8.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_9.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_9.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_10.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_10.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_11.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_11.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "input" +output_layer_names: "layer_0" +output_layer_names: "layer_1" +output_layer_names: "layer_2" +output_layer_names: "layer_3" +output_layer_names: "layer_4" +output_layer_names: "layer_5" +output_layer_names: "layer_6" +output_layer_names: "layer_7" +output_layer_names: "layer_8" +output_layer_names: "layer_9" +output_layer_names: "layer_10" +output_layer_names: "layer_11" +sub_models { + name: "root" + layer_names: "input" + layer_names: "layer_0" + layer_names: "layer_1" + layer_names: "layer_2" + layer_names: "layer_3" + layer_names: "layer_4" + layer_names: "layer_5" + layer_names: "layer_6" + layer_names: "layer_7" + layer_names: "layer_8" + layer_names: "layer_9" + layer_names: "layer_10" + layer_names: "layer_11" + input_layer_names: "input" + output_layer_names: "layer_0" + output_layer_names: "layer_1" + output_layer_names: "layer_2" + output_layer_names: "layer_3" + output_layer_names: "layer_4" + output_layer_names: "layer_5" + output_layer_names: "layer_6" + output_layer_names: "layer_7" + output_layer_names: "layer_8" + output_layer_names: "layer_9" + output_layer_names: "layer_10" + output_layer_names: "layer_11" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr new file mode 100644 index 0000000000000..1767445c44bf5 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr @@ -0,0 +1,235 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__exp_0__" + type: "mixed" + size: 100 + active_type: "exponential" + inputs { + input_layer_name: "data" + proj_conf { + type: "identity" + name: "___exp_0__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__log_0__" + type: "mixed" + size: 100 + active_type: "log" + inputs { + input_layer_name: "__exp_0__" + proj_conf { + type: "identity" + name: "___log_0__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__abs_0__" + type: "mixed" + size: 100 + active_type: "abs" + inputs { + input_layer_name: "__log_0__" + proj_conf { + type: "identity" + name: "___abs_0__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__sigmoid_0__" + type: "mixed" + size: 100 + active_type: "sigmoid" + inputs { + input_layer_name: "__abs_0__" + proj_conf { + type: "identity" + name: "___sigmoid_0__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__square_0__" + type: "mixed" + size: 100 + active_type: "square" + inputs { + input_layer_name: "__sigmoid_0__" + proj_conf { + type: "identity" + name: "___square_0__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__square_1__" + type: "mixed" + size: 100 + active_type: "square" + inputs { + input_layer_name: "__square_0__" + proj_conf { + type: "identity" + name: "___square_1__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__slope_intercept_layer_0__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__square_1__" + } + slope: 1.0 + intercept: 1 +} +layers { + name: "__slope_intercept_layer_1__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__slope_intercept_layer_0__" + } + slope: 1.0 + intercept: 1 +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__square_1__" + proj_conf { + type: "identity" + name: "___mixed_0__.w0" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__slope_intercept_layer_1__" + proj_conf { + type: "identity" + name: "___mixed_0__.w1" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__slope_intercept_layer_2__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__square_1__" + } + slope: -1.0 + intercept: 0.0 +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_0__" + proj_conf { + type: "identity" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__slope_intercept_layer_2__" + proj_conf { + type: "identity" + name: "___mixed_1__.w1" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__slope_intercept_layer_3__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_1__" + } + slope: 1.0 + intercept: 2 +} +layers { + name: "__slope_intercept_layer_4__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__slope_intercept_layer_3__" + } + slope: -1.0 + intercept: 0.0 +} +layers { + name: "__slope_intercept_layer_5__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__slope_intercept_layer_4__" + } + slope: 1.0 + intercept: 2 +} +input_layer_names: "data" +output_layer_names: "__slope_intercept_layer_5__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__exp_0__" + layer_names: "__log_0__" + layer_names: "__abs_0__" + layer_names: "__sigmoid_0__" + layer_names: "__square_0__" + layer_names: "__square_1__" + layer_names: "__slope_intercept_layer_0__" + layer_names: "__slope_intercept_layer_1__" + layer_names: "__mixed_0__" + layer_names: "__slope_intercept_layer_2__" + layer_names: "__mixed_1__" + layer_names: "__slope_intercept_layer_3__" + layer_names: "__slope_intercept_layer_4__" + layer_names: "__slope_intercept_layer_5__" + input_layer_names: "data" + output_layer_names: "__slope_intercept_layer_5__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr new file mode 100644 index 0000000000000..e47e531a2223d --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/projections.protostr @@ -0,0 +1,315 @@ +type: "nn" +layers { + name: "test" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__embedding_0__" + type: "mixed" + size: 256 + active_type: "" + inputs { + input_layer_name: "test" + input_parameter_name: "___embedding_0__.w0" + proj_conf { + type: "table" + name: "___embedding_0__.w0" + input_size: 100 + output_size: 256 + } + } +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__embedding_0__" + input_parameter_name: "___mixed_0__.w0" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 256 + output_size: 100 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_0__" + input_parameter_name: "___mixed_1__.w0" + proj_conf { + type: "table" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_2__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_1__" + proj_conf { + type: "identity" + name: "___mixed_2__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_3__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_2__" + input_parameter_name: "___mixed_3__.w0" + proj_conf { + type: "dot_mul" + name: "___mixed_3__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_4__" + type: "mixed" + size: 300 + active_type: "" + inputs { + input_layer_name: "__mixed_3__" + input_parameter_name: "___mixed_4__.w0" + proj_conf { + type: "context" + name: "___mixed_4__.w0" + input_size: 100 + output_size: 300 + context_start: -1 + context_length: 3 + trainable_padding: true + } + } +} +layers { + name: "__mixed_5__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_2__" + } + inputs { + input_layer_name: "__mixed_3__" + } + operator_confs { + type: "dot_mul" + input_indices: 0 + input_indices: 1 + input_sizes: 100 + input_sizes: 100 + output_size: 100 + dotmul_scale: 1 + } +} +layers { + name: "img" + type: "data" + size: 1024 + active_type: "" +} +layers { + name: "filter" + type: "data" + size: 576 + active_type: "" +} +layers { + name: "__mixed_6__" + type: "mixed" + size: 57600 + active_type: "" + inputs { + input_layer_name: "img" + } + inputs { + input_layer_name: "filter" + } + operator_confs { + type: "conv" + input_indices: 0 + input_indices: 1 + input_sizes: 1024 + input_sizes: 576 + output_size: 57600 + conv_conf { + filter_size: 3 + channels: 1 + stride: 1 + padding: 0 + groups: 1 + filter_channels: 1 + output_x: 30 + img_size: 32 + caffe_mode: true + filter_size_y: 3 + padding_y: 0 + stride_y: 1 + } + num_filters: 64 + } +} +layers { + name: "__mixed_7__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_4__" + input_parameter_name: "___mixed_7__.w0" + proj_conf { + type: "fc" + name: "___mixed_7__.w0" + input_size: 300 + output_size: 100 + } + } + inputs { + input_layer_name: "__mixed_5__" + input_parameter_name: "___mixed_7__.w1" + proj_conf { + type: "trans_fc" + name: "___mixed_7__.w1" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__mixed_6__" + input_parameter_name: "___mixed_7__.w2" + proj_conf { + type: "fc" + name: "___mixed_7__.w2" + input_size: 57600 + output_size: 100 + } + } + drop_rate: 0.5 +} +parameters { + name: "___embedding_0__.w0" + size: 25600 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 256 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_0__.w0" + size: 25600 + initial_mean: 0.0 + initial_std: 0.0625 + dims: 256 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_1__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_3__.w0" + size: 100 + initial_mean: 0.0 + initial_std: 1.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_4__.w0" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 2 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___mixed_7__.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.057735026919 + dims: 300 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_7__.w1" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_7__.w2" + size: 5760000 + initial_mean: 0.0 + initial_std: 0.00416666666667 + dims: 57600 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "test" +input_layer_names: "img" +input_layer_names: "filter" +output_layer_names: "__mixed_7__" +sub_models { + name: "root" + layer_names: "test" + layer_names: "__embedding_0__" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__mixed_2__" + layer_names: "__mixed_3__" + layer_names: "__mixed_4__" + layer_names: "__mixed_5__" + layer_names: "img" + layer_names: "filter" + layer_names: "__mixed_6__" + layer_names: "__mixed_7__" + input_layer_names: "test" + input_layer_names: "img" + input_layer_names: "filter" + output_layer_names: "__mixed_7__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_fc.protostr new file mode 100644 index 0000000000000..3e8633b079831 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_fc.protostr @@ -0,0 +1,125 @@ +type: "nn" +layers { + name: "feature_a" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "feature_b" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "feature_a" + input_parameter_name: "fc_param" + } + bias_parameter_name: "bias_param" +} +layers { + name: "__fc_layer_1__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "feature_b" + input_parameter_name: "fc_param" + } + bias_parameter_name: "bias_param" +} +layers { + name: "__fc_layer_2__" + type: "fc" + size: 10 + active_type: "softmax" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "softmax_param" + } + inputs { + input_layer_name: "__fc_layer_1__" + input_parameter_name: "softmax_param" + } +} +layers { + name: "label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__cost_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_2__" + } + inputs { + input_layer_name: "label" + } + coeff: 1.0 +} +parameters { + name: "fc_param" + size: 40000 + initial_mean: 0.0 + initial_std: 1.0 + dims: 200 + dims: 200 + initial_strategy: 1 + initial_smart: false +} +parameters { + name: "bias_param" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "softmax_param" + size: 2000 + initial_mean: 0.0 + initial_std: 1.0 + dims: 200 + dims: 10 + initial_strategy: 1 + initial_smart: false +} +input_layer_names: "feature_a" +input_layer_names: "feature_b" +input_layer_names: "label" +output_layer_names: "__cost_0__" +evaluators { + name: "classification_error_evaluator" + type: "classification_error" + input_layers: "__fc_layer_2__" + input_layers: "label" +} +sub_models { + name: "root" + layer_names: "feature_a" + layer_names: "feature_b" + layer_names: "__fc_layer_0__" + layer_names: "__fc_layer_1__" + layer_names: "__fc_layer_2__" + layer_names: "label" + layer_names: "__cost_0__" + input_layer_names: "feature_a" + input_layer_names: "feature_b" + input_layer_names: "label" + output_layer_names: "__cost_0__" + evaluator_names: "classification_error_evaluator" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr new file mode 100644 index 0000000000000..0a83499b72480 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/shared_lstm.protostr @@ -0,0 +1,393 @@ +type: "recurrent_nn" +layers { + name: "data_a" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "data_b" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "data_a" + input_parameter_name: "mixed_param" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "data_b" + input_parameter_name: "mixed_param" + proj_conf { + type: "fc" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_0__@__lstm_group_0___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_0___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + input_parameter_name: "lstm_param" + proj_conf { + type: "fc" + name: "___lstm_group_0___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + } + bias_parameter_name: "lstm_bias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_1__@__lstm_group_1___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_1__@__lstm_group_1___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_1___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + input_parameter_name: "lstm_param" + proj_conf { + type: "fc" + name: "___lstm_group_1___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + } + bias_parameter_name: "lstm_bias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_1__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_0__" + } + trans_type: "non-seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_1__" + } + trans_type: "non-seq" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 10 + active_type: "softmax" + inputs { + input_layer_name: "__last_seq_0__" + input_parameter_name: "softmax_param" + } + inputs { + input_layer_name: "__last_seq_1__" + input_parameter_name: "softmax_param" + } +} +layers { + name: "label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__cost_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + coeff: 1.0 +} +parameters { + name: "mixed_param" + size: 40000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "lstm_param" + size: 40000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "lstm_bias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "softmax_param" + size: 1000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 10 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data_a" +input_layer_names: "data_b" +input_layer_names: "label" +output_layer_names: "__cost_0__" +evaluators { + name: "classification_error_evaluator" + type: "classification_error" + input_layers: "__fc_layer_0__" + input_layers: "label" +} +sub_models { + name: "root" + layer_names: "data_a" + layer_names: "data_b" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__" + layer_names: "__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__" + layer_names: "__last_seq_0__" + layer_names: "__last_seq_1__" + layer_names: "__fc_layer_0__" + layer_names: "label" + layer_names: "__cost_0__" + input_layer_names: "data_a" + input_layer_names: "data_b" + input_layer_names: "label" + output_layer_names: "__cost_0__" + evaluator_names: "classification_error_evaluator" + is_recurrent_layer_group: false +} +sub_models { + name: "__lstm_group_0___recurrent_group" + layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_0__" + link_name: "__mixed_0__@__lstm_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__lstm_group_1___recurrent_group" + layer_names: "__mixed_1__@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_1__" + link_name: "__mixed_1__@__lstm_group_1___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1__" + has_subseq: false + } + target_inlinkid: -1 +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr new file mode 100644 index 0000000000000..dacb40185f863 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/simple_rnn_layers.protostr @@ -0,0 +1,418 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "__recurrent_layer_0__" + type: "recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___recurrent_layer_0__.w0" + } + bias_parameter_name: "___recurrent_layer_0__.wbias" + reversed: false +} +layers { + name: "__recurrent_layer_1__" + type: "recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___recurrent_layer_1__.w0" + } + bias_parameter_name: "___recurrent_layer_1__.wbias" + reversed: true +} +layers { + name: "__fc_layer_1__" + type: "fc" + size: 800 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_1__.w0" + } +} +layers { + name: "__lstmemory_0__" + type: "lstmemory" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_1__" + input_parameter_name: "___lstmemory_0__.w0" + } + bias_parameter_name: "___lstmemory_0__.wbias" + reversed: false + active_gate_type: "sigmoid" + active_state_type: "tanh" +} +layers { + name: "__fc_layer_2__" + type: "fc" + size: 800 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_2__.w0" + } +} +layers { + name: "__lstmemory_1__" + type: "lstmemory" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_2__" + input_parameter_name: "___lstmemory_1__.w0" + } + bias_parameter_name: "___lstmemory_1__.wbias" + reversed: true + active_gate_type: "sigmoid" + active_state_type: "tanh" +} +layers { + name: "__fc_layer_3__" + type: "fc" + size: 600 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_3__.w0" + } +} +layers { + name: "__gru_0__" + type: "gated_recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_3__" + input_parameter_name: "___gru_0__.w0" + } + bias_parameter_name: "___gru_0__.wbias" + reversed: false + active_gate_type: "sigmoid" +} +layers { + name: "__fc_layer_4__" + type: "fc" + size: 600 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_4__.w0" + } +} +layers { + name: "__gru_1__" + type: "gated_recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_4__" + input_parameter_name: "___gru_1__.w0" + } + bias_parameter_name: "___gru_1__.wbias" + reversed: true + active_gate_type: "sigmoid" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__recurrent_layer_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__recurrent_layer_1__" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__lstmemory_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__lstmemory_1__" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_2__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__gru_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_2__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__gru_1__" + } + select_first: true + trans_type: "non-seq" +} +parameters { + name: "___fc_layer_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___recurrent_layer_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___recurrent_layer_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___recurrent_layer_1__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___recurrent_layer_1__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_1__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 800 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.wbias" + size: 1400 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1400 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_2__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 800 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_1__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_1__.wbias" + size: 1400 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1400 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_3__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.wbias" + size: 600 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 600 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_4__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_1__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_1__.wbias" + size: 600 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 600 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__last_seq_0__" +output_layer_names: "__first_seq_0__" +output_layer_names: "__last_seq_1__" +output_layer_names: "__first_seq_1__" +output_layer_names: "__last_seq_2__" +output_layer_names: "__first_seq_2__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__fc_layer_0__" + layer_names: "__recurrent_layer_0__" + layer_names: "__recurrent_layer_1__" + layer_names: "__fc_layer_1__" + layer_names: "__lstmemory_0__" + layer_names: "__fc_layer_2__" + layer_names: "__lstmemory_1__" + layer_names: "__fc_layer_3__" + layer_names: "__gru_0__" + layer_names: "__fc_layer_4__" + layer_names: "__gru_1__" + layer_names: "__last_seq_0__" + layer_names: "__first_seq_0__" + layer_names: "__last_seq_1__" + layer_names: "__first_seq_1__" + layer_names: "__last_seq_2__" + layer_names: "__first_seq_2__" + input_layer_names: "data" + output_layer_names: "__last_seq_0__" + output_layer_names: "__first_seq_0__" + output_layer_names: "__last_seq_1__" + output_layer_names: "__first_seq_1__" + output_layer_names: "__last_seq_2__" + output_layer_names: "__first_seq_2__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr new file mode 100644 index 0000000000000..b110e91498ce7 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bi_grumemory.protostr @@ -0,0 +1,152 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 120 + active_type: "" +} +layers { + name: "__bidirectional_gru_0___fw_transform" + type: "mixed" + size: 120 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___bidirectional_gru_0___fw_transform.w0" + proj_conf { + type: "fc" + name: "___bidirectional_gru_0___fw_transform.w0" + input_size: 120 + output_size: 120 + } + } +} +layers { + name: "__bidirectional_gru_0___fw" + type: "gated_recurrent" + size: 40 + active_type: "tanh" + inputs { + input_layer_name: "__bidirectional_gru_0___fw_transform" + input_parameter_name: "___bidirectional_gru_0___fw.w0" + } + bias_parameter_name: "___bidirectional_gru_0___fw.wbias" + reversed: false + active_gate_type: "sigmoid" +} +layers { + name: "__bidirectional_gru_0___bw_transform" + type: "mixed" + size: 120 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___bidirectional_gru_0___bw_transform.w0" + proj_conf { + type: "fc" + name: "___bidirectional_gru_0___bw_transform.w0" + input_size: 120 + output_size: 120 + } + } +} +layers { + name: "__bidirectional_gru_0___bw" + type: "gated_recurrent" + size: 40 + active_type: "tanh" + inputs { + input_layer_name: "__bidirectional_gru_0___bw_transform" + input_parameter_name: "___bidirectional_gru_0___bw.w0" + } + bias_parameter_name: "___bidirectional_gru_0___bw.wbias" + reversed: true + active_gate_type: "sigmoid" +} +layers { + name: "__bidirectional_gru_0__" + type: "concat" + size: 80 + active_type: "" + inputs { + input_layer_name: "__bidirectional_gru_0___fw" + } + inputs { + input_layer_name: "__bidirectional_gru_0___bw" + } +} +parameters { + name: "___bidirectional_gru_0___fw_transform.w0" + size: 14400 + initial_mean: 0.0 + initial_std: 0.0912870929175 + dims: 120 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___bidirectional_gru_0___fw.w0" + size: 4800 + initial_mean: 0.0 + initial_std: 0.158113883008 + dims: 40 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___bidirectional_gru_0___fw.wbias" + size: 120 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 120 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___bidirectional_gru_0___bw_transform.w0" + size: 14400 + initial_mean: 0.0 + initial_std: 0.0912870929175 + dims: 120 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___bidirectional_gru_0___bw.w0" + size: 4800 + initial_mean: 0.0 + initial_std: 0.158113883008 + dims: 40 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___bidirectional_gru_0___bw.wbias" + size: 120 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 120 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__bidirectional_gru_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__bidirectional_gru_0___fw_transform" + layer_names: "__bidirectional_gru_0___fw" + layer_names: "__bidirectional_gru_0___bw_transform" + layer_names: "__bidirectional_gru_0___bw" + layer_names: "__bidirectional_gru_0__" + input_layer_names: "data" + output_layer_names: "__bidirectional_gru_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr new file mode 100644 index 0000000000000..5261cf0c44943 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr @@ -0,0 +1,289 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "labels" + type: "data" + size: 5000 + active_type: "" +} +layers { + name: "probs" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "xe-label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__ctc_layer_0__" + type: "ctc" + size: 5001 + active_type: "" + inputs { + input_layer_name: "input" + } + inputs { + input_layer_name: "labels" + } + norm_by_times: false +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 4 + active_type: "tanh" + inputs { + input_layer_name: "input" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "crf_label" + type: "data" + size: 4 + active_type: "" +} +layers { + name: "__crf_layer_0__" + type: "crf" + size: 4 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___crf_layer_0__.w0" + } + inputs { + input_layer_name: "crf_label" + } + coeff: 1.0 +} +layers { + name: "left" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "right" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__rank_cost_0__" + type: "rank-cost" + size: 1 + active_type: "" + inputs { + input_layer_name: "left" + } + inputs { + input_layer_name: "right" + } + inputs { + input_layer_name: "label" + } + coeff: 1.0 +} +layers { + name: "list_feature" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "list_scores" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__lambda_cost_0__" + type: "lambda_cost" + size: 1 + active_type: "" + inputs { + input_layer_name: "list_feature" + } + inputs { + input_layer_name: "list_scores" + } + NDCG_num: 5 + max_sort_size: -1 +} +layers { + name: "__cross_entropy_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + coeff: 1.0 +} +layers { + name: "__cross_entropy_with_selfnorm_0__" + type: "multi_class_cross_entropy_with_selfnorm" + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + softmax_selfnorm_alpha: 0.1 + coeff: 1.0 +} +layers { + name: "huber_probs" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "huber_label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__huber_cost_0__" + type: "huber" + size: 1 + active_type: "" + inputs { + input_layer_name: "huber_probs" + } + inputs { + input_layer_name: "huber_label" + } + coeff: 1.0 +} +layers { + name: "__multi_binary_label_cross_entropy_0__" + type: "multi_binary_label_cross_entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + coeff: 1.0 +} +parameters { + name: "___fc_layer_0__.w0" + size: 800 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 4 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 4 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___crf_layer_0__.w0" + size: 24 + initial_mean: 0.0 + initial_std: 0.5 + dims: 4 + dims: 6 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "input" +input_layer_names: "labels" +input_layer_names: "crf_label" +input_layer_names: "left" +input_layer_names: "right" +input_layer_names: "label" +input_layer_names: "list_feature" +input_layer_names: "list_scores" +input_layer_names: "probs" +input_layer_names: "xe-label" +input_layer_names: "huber_probs" +input_layer_names: "huber_label" +output_layer_names: "__ctc_layer_0__" +output_layer_names: "__crf_layer_0__" +output_layer_names: "__rank_cost_0__" +output_layer_names: "__lambda_cost_0__" +output_layer_names: "__cross_entropy_0__" +output_layer_names: "__cross_entropy_with_selfnorm_0__" +output_layer_names: "__huber_cost_0__" +output_layer_names: "__multi_binary_label_cross_entropy_0__" +sub_models { + name: "root" + layer_names: "input" + layer_names: "labels" + layer_names: "probs" + layer_names: "xe-label" + layer_names: "__ctc_layer_0__" + layer_names: "__fc_layer_0__" + layer_names: "crf_label" + layer_names: "__crf_layer_0__" + layer_names: "left" + layer_names: "right" + layer_names: "label" + layer_names: "__rank_cost_0__" + layer_names: "list_feature" + layer_names: "list_scores" + layer_names: "__lambda_cost_0__" + layer_names: "__cross_entropy_0__" + layer_names: "__cross_entropy_with_selfnorm_0__" + layer_names: "huber_probs" + layer_names: "huber_label" + layer_names: "__huber_cost_0__" + layer_names: "__multi_binary_label_cross_entropy_0__" + input_layer_names: "input" + input_layer_names: "labels" + input_layer_names: "crf_label" + input_layer_names: "left" + input_layer_names: "right" + input_layer_names: "label" + input_layer_names: "list_feature" + input_layer_names: "list_scores" + input_layer_names: "probs" + input_layer_names: "xe-label" + input_layer_names: "huber_probs" + input_layer_names: "huber_label" + output_layer_names: "__ctc_layer_0__" + output_layer_names: "__crf_layer_0__" + output_layer_names: "__rank_cost_0__" + output_layer_names: "__lambda_cost_0__" + output_layer_names: "__cross_entropy_0__" + output_layer_names: "__cross_entropy_with_selfnorm_0__" + output_layer_names: "__huber_cost_0__" + output_layer_names: "__multi_binary_label_cross_entropy_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr new file mode 100644 index 0000000000000..811b38ae4a51e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers_with_weight.protostr @@ -0,0 +1,111 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "weight" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 10 + active_type: "softmax" + inputs { + input_layer_name: "input" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "__cost_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + inputs { + input_layer_name: "weight" + } + coeff: 1.0 +} +layers { + name: "__regression_cost_0__" + type: "square_error" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + inputs { + input_layer_name: "weight" + } + coeff: 1.0 +} +parameters { + name: "___fc_layer_0__.w0" + size: 3000 + initial_mean: 0.0 + initial_std: 0.057735026919 + dims: 300 + dims: 10 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 10 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 10 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "input" +input_layer_names: "label" +input_layer_names: "weight" +output_layer_names: "__cost_0__" +output_layer_names: "__regression_cost_0__" +evaluators { + name: "classification_error_evaluator" + type: "classification_error" + input_layers: "__fc_layer_0__" + input_layers: "label" + input_layers: "weight" +} +sub_models { + name: "root" + layer_names: "input" + layer_names: "label" + layer_names: "weight" + layer_names: "__fc_layer_0__" + layer_names: "__cost_0__" + layer_names: "__regression_cost_0__" + input_layer_names: "input" + input_layer_names: "label" + input_layer_names: "weight" + output_layer_names: "__cost_0__" + output_layer_names: "__regression_cost_0__" + evaluator_names: "classification_error_evaluator" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_expand_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_expand_layer.protostr new file mode 100644 index 0000000000000..f4b36052264bc --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_expand_layer.protostr @@ -0,0 +1,56 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "data_seq" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "__expand_layer_0__" + type: "expand" + size: 30 + active_type: "" + inputs { + input_layer_name: "data" + } + inputs { + input_layer_name: "data_seq" + } + trans_type: "seq" +} +layers { + name: "__expand_layer_1__" + type: "expand" + size: 30 + active_type: "" + inputs { + input_layer_name: "data" + } + inputs { + input_layer_name: "data_seq" + } + trans_type: "non-seq" +} +input_layer_names: "data" +input_layer_names: "data_seq" +output_layer_names: "__expand_layer_0__" +output_layer_names: "__expand_layer_1__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "data_seq" + layer_names: "__expand_layer_0__" + layer_names: "__expand_layer_1__" + input_layer_names: "data" + input_layer_names: "data_seq" + output_layer_names: "__expand_layer_0__" + output_layer_names: "__expand_layer_1__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_fc.protostr new file mode 100644 index 0000000000000..8151898832ded --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_fc.protostr @@ -0,0 +1,98 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__trans_layer_0__" + type: "trans" + size: 100 + active_type: "" + inputs { + input_layer_name: "data" + } +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__trans_layer_0__" + input_parameter_name: "___fc_layer_0__.w0" + } +} +layers { + name: "mask" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__selective_fc_layer_0__" + type: "selective_fc" + size: 100 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___selective_fc_layer_0__.w0" + } + inputs { + input_layer_name: "mask" + } + bias_parameter_name: "___selective_fc_layer_0__.wbias" + selective_fc_pass_generation: false + has_selected_colums: true + selective_fc_full_mul_ratio: 0.02 +} +parameters { + name: "___fc_layer_0__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___selective_fc_layer_0__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true + is_sparse: false +} +parameters { + name: "___selective_fc_layer_0__.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +input_layer_names: "mask" +output_layer_names: "__fc_layer_0__" +output_layer_names: "__selective_fc_layer_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__trans_layer_0__" + layer_names: "__fc_layer_0__" + layer_names: "mask" + layer_names: "__selective_fc_layer_0__" + input_layer_names: "data" + input_layer_names: "mask" + output_layer_names: "__fc_layer_0__" + output_layer_names: "__selective_fc_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_grumemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_grumemory_layer.protostr new file mode 100644 index 0000000000000..2c19b2fd120e7 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_grumemory_layer.protostr @@ -0,0 +1,51 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 120 + active_type: "" +} +layers { + name: "__gru_0__" + type: "gated_recurrent" + size: 40 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___gru_0__.w0" + } + bias_parameter_name: "___gru_0__.wbias" + reversed: true + active_gate_type: "tanh" +} +parameters { + name: "___gru_0__.w0" + size: 4800 + initial_mean: 0.0 + initial_std: 0.158113883008 + dims: 40 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.wbias" + size: 120 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 120 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__gru_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__gru_0__" + input_layer_names: "data" + output_layer_names: "__gru_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_hsigmoid.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_hsigmoid.protostr new file mode 100644 index 0000000000000..e81fcb13c4c6e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_hsigmoid.protostr @@ -0,0 +1,62 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__hsigmoid_0__" + type: "hsigmoid" + size: 1 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___hsigmoid_0__.w0" + } + inputs { + input_layer_name: "label" + } + bias_parameter_name: "___hsigmoid_0__.wbias" + num_classes: 10 +} +parameters { + name: "___hsigmoid_0__.w0" + size: 900 + initial_mean: 0.0 + initial_std: 0.333333333333 + dims: 9 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___hsigmoid_0__.wbias" + size: 9 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 9 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +input_layer_names: "label" +output_layer_names: "__hsigmoid_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "label" + layer_names: "__hsigmoid_0__" + input_layer_names: "data" + input_layer_names: "label" + output_layer_names: "__hsigmoid_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_lstmemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_lstmemory_layer.protostr new file mode 100644 index 0000000000000..76a4afab82c59 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_lstmemory_layer.protostr @@ -0,0 +1,53 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 128 + active_type: "" +} +layers { + name: "__lstmemory_0__" + type: "lstmemory" + size: 32 + active_type: "tanh" + inputs { + input_layer_name: "data" + input_parameter_name: "___lstmemory_0__.w0" + } + bias_parameter_name: "___lstmemory_0__.wbias" + reversed: true + active_gate_type: "tanh" + active_state_type: "tanh" +} +parameters { + name: "___lstmemory_0__.w0" + size: 4096 + initial_mean: 0.0 + initial_std: 0.176776695297 + dims: 32 + dims: 32 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.wbias" + size: 224 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 224 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__lstmemory_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__lstmemory_0__" + input_layer_names: "data" + output_layer_names: "__lstmemory_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_maxout.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_maxout.protostr new file mode 100644 index 0000000000000..1be2a7ceebfb7 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_maxout.protostr @@ -0,0 +1,209 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 2304 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconv" + size: 36864 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 3 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 1 + output_x: 48 + img_size: 48 + caffe_mode: true + filter_size_y: 3 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 16 + shared_biases: true +} +layers { + name: "__maxout_layer_0__" + type: "maxout" + size: 18432 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + maxout_conf { + channels: 16 + groups: 2 + img_size_x: 0 + img_size_y: 0 + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 4608 + active_type: "" + inputs { + input_layer_name: "__maxout_layer_0__" + pool_conf { + pool_type: "max-projection" + channels: 8 + size_x: 2 + stride: 2 + output_x: 24 + img_size: 48 + padding: 0 + size_y: 2 + stride_y: 2 + output_y: 24 + img_size_y: 48 + padding_y: 0 + } + } +} +layers { + name: "__conv_1__" + type: "exconv" + size: 18432 + active_type: "" + inputs { + input_layer_name: "__pool_0__" + input_parameter_name: "___conv_1__.w0" + conv_conf { + filter_size: 3 + channels: 32 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 32 + output_x: 12 + img_size: 12 + caffe_mode: true + filter_size_y: 3 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_1__.wbias" + num_filters: 128 + shared_biases: true +} +layers { + name: "__maxout_layer_1__" + type: "maxout" + size: 9216 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + maxout_conf { + channels: 128 + groups: 4 + img_size_x: 0 + img_size_y: 0 + } + } +} +layers { + name: "__block_expand_layer_0__" + type: "blockexpand" + size: 192 + active_type: "" + inputs { + input_layer_name: "__maxout_layer_0__" + block_expand_conf { + channels: 32 + stride_x: 1 + stride_y: 1 + padding_x: 0 + padding_y: 0 + block_x: 1 + block_y: 6 + output_x: 0 + output_y: 0 + img_size_x: 0 + img_size_y: 0 + } + } +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 384 + active_type: "tanh" + inputs { + input_layer_name: "__block_expand_layer_0__" + input_parameter_name: "___fc_layer_0__.w0" + } +} +parameters { + name: "___conv_0__.w0" + size: 144 + initial_mean: 0.0 + initial_std: 0.471404520791 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 16 + initial_mean: 0.0 + initial_std: 0.0 + dims: 16 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_1__.w0" + size: 36864 + initial_mean: 0.0 + initial_std: 0.0833333333333 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_1__.wbias" + size: 128 + initial_mean: 0.0 + initial_std: 0.0 + dims: 128 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_0__.w0" + size: 73728 + initial_mean: 0.0 + initial_std: 0.0721687836487 + dims: 192 + dims: 384 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data" +output_layer_names: "__fc_layer_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__conv_0__" + layer_names: "__maxout_layer_0__" + layer_names: "__pool_0__" + layer_names: "__conv_1__" + layer_names: "__maxout_layer_1__" + layer_names: "__block_expand_layer_0__" + layer_names: "__fc_layer_0__" + input_layer_names: "data" + output_layer_names: "__fc_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_ntm_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_ntm_layers.protostr new file mode 100644 index 0000000000000..b30bbb2a4e24d --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_ntm_layers.protostr @@ -0,0 +1,225 @@ +type: "nn" +layers { + name: "w" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "a" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "b" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "c" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "d" + type: "data" + size: 31 + active_type: "" +} +layers { + name: "__interpolation_layer_0__" + type: "interpolation" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__power_layer_0__" + type: "power" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } +} +layers { + name: "__scaling_layer_0__" + type: "scaling" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } +} +layers { + name: "__cos_sim_0__" + type: "cos" + size: 1 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } + cos_scale: 5 +} +layers { + name: "__cos_sim_1__" + type: "cos_vm" + size: 2 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "c" + } + cos_scale: 5 +} +layers { + name: "__sum_to_one_norm_layer_0__" + type: "sum_to_one_norm" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } +} +layers { + name: "__conv_shift_layer_0__" + type: "conv_shift" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "d" + } +} +layers { + name: "__tensor_layer_0__" + type: "tensor" + size: 1000 + active_type: "" + inputs { + input_layer_name: "a" + input_parameter_name: "___tensor_layer_0__.w0" + } + inputs { + input_layer_name: "b" + } + bias_parameter_name: "___tensor_layer_0__.wbias" +} +layers { + name: "__slope_intercept_layer_0__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } + slope: 0.7 + intercept: 0.9 +} +layers { + name: "__linear_comb_layer_0__" + type: "convex_comb" + size: 2 + active_type: "" + inputs { + input_layer_name: "b" + } + inputs { + input_layer_name: "c" + } +} +parameters { + name: "___tensor_layer_0__.w0" + size: 10000000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 100 + dims: 1000 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___tensor_layer_0__.wbias" + size: 1000 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1000 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "w" +input_layer_names: "a" +input_layer_names: "b" +input_layer_names: "c" +input_layer_names: "d" +output_layer_names: "__interpolation_layer_0__" +output_layer_names: "__power_layer_0__" +output_layer_names: "__scaling_layer_0__" +output_layer_names: "__cos_sim_0__" +output_layer_names: "__cos_sim_1__" +output_layer_names: "__sum_to_one_norm_layer_0__" +output_layer_names: "__conv_shift_layer_0__" +output_layer_names: "__tensor_layer_0__" +output_layer_names: "__slope_intercept_layer_0__" +output_layer_names: "__linear_comb_layer_0__" +sub_models { + name: "root" + layer_names: "w" + layer_names: "a" + layer_names: "b" + layer_names: "c" + layer_names: "d" + layer_names: "__interpolation_layer_0__" + layer_names: "__power_layer_0__" + layer_names: "__scaling_layer_0__" + layer_names: "__cos_sim_0__" + layer_names: "__cos_sim_1__" + layer_names: "__sum_to_one_norm_layer_0__" + layer_names: "__conv_shift_layer_0__" + layer_names: "__tensor_layer_0__" + layer_names: "__slope_intercept_layer_0__" + layer_names: "__linear_comb_layer_0__" + input_layer_names: "w" + input_layer_names: "a" + input_layer_names: "b" + input_layer_names: "c" + input_layer_names: "d" + output_layer_names: "__interpolation_layer_0__" + output_layer_names: "__power_layer_0__" + output_layer_names: "__scaling_layer_0__" + output_layer_names: "__cos_sim_0__" + output_layer_names: "__cos_sim_1__" + output_layer_names: "__sum_to_one_norm_layer_0__" + output_layer_names: "__conv_shift_layer_0__" + output_layer_names: "__tensor_layer_0__" + output_layer_names: "__slope_intercept_layer_0__" + output_layer_names: "__linear_comb_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_print_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_print_layer.protostr new file mode 100644 index 0000000000000..c402aff174ab7 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_print_layer.protostr @@ -0,0 +1,26 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__print_0__" + type: "print" + active_type: "" + inputs { + input_layer_name: "input" + } +} +input_layer_names: "input" +output_layer_names: "input" +sub_models { + name: "root" + layer_names: "input" + layer_names: "__print_0__" + input_layer_names: "input" + output_layer_names: "input" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr new file mode 100644 index 0000000000000..41d2e2f2671f5 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_rnn_group.protostr @@ -0,0 +1,650 @@ +type: "recurrent_nn" +layers { + name: "seq_input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "sub_seq_input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "seq_input" + input_parameter_name: "___mixed_0__.w0" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 300 + active_type: "" + inputs { + input_layer_name: "seq_input" + input_parameter_name: "___mixed_1__.w0" + proj_conf { + type: "fc" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 300 + } + } +} +layers { + name: "__recurrent_group_0__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "seq_input@__recurrent_group_0__" + type: "scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_forward+delay1@__recurrent_group_0__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_forward@__recurrent_group_0__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "seq_input@__recurrent_group_0__" + input_parameter_name: "_rnn_forward@__recurrent_group_0__.w0" + } + inputs { + input_layer_name: "rnn_forward+delay1@__recurrent_group_0__" + input_parameter_name: "_rnn_forward@__recurrent_group_0__.w1" + } + bias_parameter_name: "_rnn_forward@__recurrent_group_0__.wbias" +} +layers { + name: "rnn_forward" + type: "gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_forward" + } + trans_type: "non-seq" +} +layers { + name: "__recurrent_group_1__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "seq_input@__recurrent_group_1__" + type: "scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_back+delay1@__recurrent_group_1__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_back@__recurrent_group_1__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "seq_input@__recurrent_group_1__" + input_parameter_name: "_rnn_back@__recurrent_group_1__.w0" + } + inputs { + input_layer_name: "rnn_back+delay1@__recurrent_group_1__" + input_parameter_name: "_rnn_back@__recurrent_group_1__.w1" + } + bias_parameter_name: "_rnn_back@__recurrent_group_1__.wbias" +} +layers { + name: "rnn_back" + type: "gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_back" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__recurrent_group_2__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "sub_seq_input@__recurrent_group_2__" + type: "sequence_scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_subseq_forward@__recurrent_group_2__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "sub_seq_input@__recurrent_group_2__" + input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w0" + } + inputs { + input_layer_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w1" + } + bias_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" +} +layers { + name: "rnn_subseq_forward" + type: "sequence_gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_subseq_forward" + } + trans_type: "non-seq" +} +layers { + name: "__lstm_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_0__@__lstm_group_0___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_0___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + input_parameter_name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" + proj_conf { + type: "fc" + name: "___lstm_group_0___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + } + bias_parameter_name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_2__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_0__" + } + trans_type: "non-seq" +} +layers { + name: "__gru_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_1__@__gru_group_0___recurrent_group" + type: "scatter_agent" + size: 300 + active_type: "" +} +layers { + name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__gru_group_0__@__gru_group_0___recurrent_group" + type: "gru_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group" + input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" + } + inputs { + input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + } + bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" + active_gate_type: "sigmoid" +} +layers { + name: "__gru_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_3__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__gru_group_0__" + } + trans_type: "non-seq" +} +parameters { + name: "___mixed_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_1__.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 300 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_rnn_back@__recurrent_group_1__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_back@__recurrent_group_1__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_back@__recurrent_group_1__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106781187 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.1 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.01 + dims: 100 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "seq_input" +input_layer_names: "sub_seq_input" +output_layer_names: "__last_seq_0__" +output_layer_names: "__first_seq_0__" +output_layer_names: "__last_seq_1__" +output_layer_names: "__last_seq_2__" +output_layer_names: "__last_seq_3__" +sub_models { + name: "root" + layer_names: "seq_input" + layer_names: "sub_seq_input" + layer_names: "label" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__recurrent_group_0__" + layer_names: "rnn_forward" + layer_names: "__last_seq_0__" + layer_names: "__recurrent_group_1__" + layer_names: "rnn_back" + layer_names: "__first_seq_0__" + layer_names: "__recurrent_group_2__" + layer_names: "rnn_subseq_forward" + layer_names: "__last_seq_1__" + layer_names: "__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__" + layer_names: "__last_seq_2__" + layer_names: "__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__" + layer_names: "__last_seq_3__" + input_layer_names: "seq_input" + input_layer_names: "sub_seq_input" + output_layer_names: "__last_seq_0__" + output_layer_names: "__first_seq_0__" + output_layer_names: "__last_seq_1__" + output_layer_names: "__last_seq_2__" + output_layer_names: "__last_seq_3__" + is_recurrent_layer_group: false +} +sub_models { + name: "__recurrent_group_0__" + layer_names: "seq_input@__recurrent_group_0__" + layer_names: "rnn_forward+delay1@__recurrent_group_0__" + layer_names: "rnn_forward@__recurrent_group_0__" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "rnn_forward@__recurrent_group_0__" + link_name: "rnn_forward+delay1@__recurrent_group_0__" + is_sequence: false + } + in_links { + layer_name: "seq_input" + link_name: "seq_input@__recurrent_group_0__" + has_subseq: false + } + out_links { + layer_name: "rnn_forward@__recurrent_group_0__" + link_name: "rnn_forward" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__recurrent_group_1__" + layer_names: "seq_input@__recurrent_group_1__" + layer_names: "rnn_back+delay1@__recurrent_group_1__" + layer_names: "rnn_back@__recurrent_group_1__" + is_recurrent_layer_group: true + reversed: true + memories { + layer_name: "rnn_back@__recurrent_group_1__" + link_name: "rnn_back+delay1@__recurrent_group_1__" + is_sequence: false + } + in_links { + layer_name: "seq_input" + link_name: "seq_input@__recurrent_group_1__" + has_subseq: false + } + out_links { + layer_name: "rnn_back@__recurrent_group_1__" + link_name: "rnn_back" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__recurrent_group_2__" + layer_names: "sub_seq_input@__recurrent_group_2__" + layer_names: "rnn_subseq_forward+delay1@__recurrent_group_2__" + layer_names: "rnn_subseq_forward@__recurrent_group_2__" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "rnn_subseq_forward@__recurrent_group_2__" + link_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + is_sequence: false + } + in_links { + layer_name: "sub_seq_input" + link_name: "sub_seq_input@__recurrent_group_2__" + has_subseq: true + } + out_links { + layer_name: "rnn_subseq_forward@__recurrent_group_2__" + link_name: "rnn_subseq_forward" + has_subseq: true + } + target_inlinkid: -1 +} +sub_models { + name: "__lstm_group_0___recurrent_group" + layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_0__" + link_name: "__mixed_0__@__lstm_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__gru_group_0___recurrent_group" + layer_names: "__mixed_1__@__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__@__gru_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" + link_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_1__" + link_name: "__mixed_1__@__gru_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" + link_name: "__gru_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr new file mode 100644 index 0000000000000..1999c006d237e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_sequence_pooling.protostr @@ -0,0 +1,111 @@ +type: "nn" +layers { + name: "dat_in" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__seq_pooling_0__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + trans_type: "seq" +} +layers { + name: "__seq_pooling_1__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_2__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "average" + trans_type: "seq" +} +layers { + name: "__seq_pooling_3__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "average" + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_4__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "sum" + trans_type: "seq" +} +layers { + name: "__seq_pooling_5__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "sum" + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_6__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + output_max_index: true + trans_type: "non-seq" +} +input_layer_names: "dat_in" +output_layer_names: "__seq_pooling_0__" +output_layer_names: "__seq_pooling_1__" +output_layer_names: "__seq_pooling_2__" +output_layer_names: "__seq_pooling_3__" +output_layer_names: "__seq_pooling_4__" +output_layer_names: "__seq_pooling_5__" +output_layer_names: "__seq_pooling_6__" +sub_models { + name: "root" + layer_names: "dat_in" + layer_names: "__seq_pooling_0__" + layer_names: "__seq_pooling_1__" + layer_names: "__seq_pooling_2__" + layer_names: "__seq_pooling_3__" + layer_names: "__seq_pooling_4__" + layer_names: "__seq_pooling_5__" + layer_names: "__seq_pooling_6__" + input_layer_names: "dat_in" + output_layer_names: "__seq_pooling_0__" + output_layer_names: "__seq_pooling_1__" + output_layer_names: "__seq_pooling_2__" + output_layer_names: "__seq_pooling_3__" + output_layer_names: "__seq_pooling_4__" + output_layer_names: "__seq_pooling_5__" + output_layer_names: "__seq_pooling_6__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/unused_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/unused_layers.protostr new file mode 100644 index 0000000000000..89ed28406e553 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/unused_layers.protostr @@ -0,0 +1,27 @@ +type: "nn" +layers { + name: "probs" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__sampling_id_layer_0__" + type: "sampling_id" + size: 100 + active_type: "" + inputs { + input_layer_name: "probs" + } +} +input_layer_names: "probs" +output_layer_names: "__sampling_id_layer_0__" +sub_models { + name: "root" + layer_names: "probs" + layer_names: "__sampling_id_layer_0__" + input_layer_names: "probs" + output_layer_names: "__sampling_id_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr new file mode 100644 index 0000000000000..d0ad388165007 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/util_layers.protostr @@ -0,0 +1,81 @@ +type: "nn" +layers { + name: "a" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "b" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__addto_0__" + type: "addto" + size: 10 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__concat_0__" + type: "concat" + size: 20 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__concat_1__" + type: "concat2" + size: 20 + active_type: "" + inputs { + input_layer_name: "a" + proj_conf { + type: "identity" + name: "___concat_1__.w0" + input_size: 10 + output_size: 10 + } + } + inputs { + input_layer_name: "b" + proj_conf { + type: "identity" + name: "___concat_1__.w1" + input_size: 10 + output_size: 10 + } + } +} +input_layer_names: "a" +input_layer_names: "b" +output_layer_names: "__addto_0__" +output_layer_names: "__concat_0__" +output_layer_names: "__concat_1__" +sub_models { + name: "root" + layer_names: "a" + layer_names: "b" + layer_names: "__addto_0__" + layer_names: "__concat_0__" + layer_names: "__concat_1__" + input_layer_names: "a" + input_layer_names: "b" + output_layer_names: "__addto_0__" + output_layer_names: "__concat_0__" + output_layer_names: "__concat_1__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh index 78114ce32b019..f05fc46cd5520 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh @@ -1,5 +1,17 @@ #!/bin/bash cd `dirname $0` + set -e + +protostr=`dirname $0`/protostr + +files=`ls $protostr | grep -v "unitest"` + ./generate_protostr.sh -md5sum -c check.md5 + +for file in $files +do + base_protostr=$protostr/$file + new_protostr=$protostr/$file.unitest + diff $base_protostr $new_protostr +done From e05f4ff26700dd34aa6d3c6da7061c62c5fa39c9 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Sun, 6 Nov 2016 23:04:02 -0600 Subject: [PATCH 082/180] Fix SRL hang when exit. (#291) * Fix SRL hang when exit. * Error occurred when enable Async Load in TestDataProvider. * It because DataProvider is calling getNextBatchInternal in one thread, and destructing DataProvider in other thread. * Add wait routine in DataProvider destructing. * Also fix another bug, when destructing TestDataProvider and do not read any test data. Fix #286 * Follow comments, Use mutex is cool! --- demo/semantic_role_labeling/.gitignore | 10 ++++++++++ paddle/gserver/dataproviders/DataProvider.cpp | 3 ++- .../gserver/dataproviders/PyDataProvider2.cpp | 19 +++++++++++++++++-- paddle/gserver/tests/test_PyDataProvider2.cpp | 17 +++++++++++++++++ 4 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 demo/semantic_role_labeling/.gitignore diff --git a/demo/semantic_role_labeling/.gitignore b/demo/semantic_role_labeling/.gitignore new file mode 100644 index 0000000000000..cd90ca7bbe9be --- /dev/null +++ b/demo/semantic_role_labeling/.gitignore @@ -0,0 +1,10 @@ +*.pyc +train.log +data/feature +data/conll05st-release/ +data/src.dict +data/test.wsj.props +data/test.wsj.seq_pair +data/test.wsj.words +data/tgt.dict +output diff --git a/paddle/gserver/dataproviders/DataProvider.cpp b/paddle/gserver/dataproviders/DataProvider.cpp index 8cefbb30ada46..2cfb5a3a18c8a 100644 --- a/paddle/gserver/dataproviders/DataProvider.cpp +++ b/paddle/gserver/dataproviders/DataProvider.cpp @@ -131,9 +131,10 @@ void DoubleBuffer::asyncLoadBatch() { taskReadySem_.wait(); if (stopping_) break; - while (batchSize_ == 0) { + while (batchSize_ == 0 && !stopping_) { usleep(5); } + if (stopping_) break; do { DataBatch newBatch; diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/gserver/dataproviders/PyDataProvider2.cpp index ca8b07af49ca0..90391a7c307d8 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/gserver/dataproviders/PyDataProvider2.cpp @@ -433,26 +433,34 @@ class PyDataProvider2 : public DataProvider { inline void resetImpl(bool startNewThread) { DBG << "Reseting " << startNewThread; + exit_.store(true); if (loadThread_) { // is loading. - exit_.store(true); loadThread_->join(); loadThread_.reset(); } { PyGuard g; callingContexts_.clear(); + this->pullCV_.notify_one(); + } + + std::lock_guard guard(mutexForReset_); + { + PyGuard g; dataPool_.clear(); } poolActualSize_ = 0; - exit_ = false; + if (startNewThread && cache_->reset()) { DBG << "Start new thread."; loadThread_.reset(new std::thread([this] { + exit_ = false; loadThread(); })); callingContextCreated_.wait(); } DBG << "Reset done"; + exit_ = false; } private: @@ -465,6 +473,8 @@ class PyDataProvider2 : public DataProvider { std::condition_variable pullCV_; std::mutex mtx_; + std::mutex mutexForReset_; + ThreadBarrier callingContextCreated_; std::unique_ptr cache_; @@ -529,6 +539,7 @@ class PyDataProvider2 : public DataProvider { * Loading a batch of data. */ int64_t getNextBatchInternal(int64_t size_, DataBatch *batch) { + std::lock_guard guard(mutexForReset_); REGISTER_TIMER("PyDP2.getNextBatchInternal") CHECK_GE(size_, 0); size_t size = (size_t) size_; @@ -554,6 +565,10 @@ class PyDataProvider2 : public DataProvider { } else { // loading from cache. poolPtr = this->cache_->load(); } + if (exit_) { + // PyDataProvider is destructing. + return 0; + } CHECK(poolPtr != nullptr); std::deque& pool = *poolPtr; diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/gserver/tests/test_PyDataProvider2.cpp index 6bf1e32925121..b9867a728d9b4 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/gserver/tests/test_PyDataProvider2.cpp @@ -353,6 +353,23 @@ TEST(PyDataProvider2, test_check) { } } +TEST(PyDataProvider2, multiThread) { + paddle::DataConfig config; + config.set_type("py2"); + config.set_files(FLAGS_train_list.c_str()); + config.set_load_data_module("test_PyDataProvider2"); + config.set_load_data_object("test_dense_no_seq"); + config.set_async_load_data(true); + + std::unique_ptr provider( + paddle::DataProvider::create(config, false)); + provider->reset(); + paddle::DataBatch batch; + provider->getNextBatch(100, &batch); + provider->reset(); + provider.reset(); +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); paddle::initMain(argc, argv); From 57348806b5a9f83536113578adc6746fe6b19421 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 14:27:06 +0800 Subject: [PATCH 083/180] Follow comments --- cmake/flags.cmake | 6 +- doc/ui/api/trainer_config_helpers/layers.rst | 2 +- paddle/cuda/include/hl_cnn.h | 12 +- paddle/cuda/include/stub/hl_cnn_stub.h | 8 +- paddle/cuda/src/hl_cuda_cnn.cu | 118 +++++++++--------- paddle/gserver/layers/BilinearInterpLayer.cpp | 19 ++- paddle/gserver/layers/BilinearInterpLayer.h | 2 +- paddle/gserver/tests/test_LayerGrad.cpp | 2 - paddle/math/Matrix.cpp | 24 +++- proto/ModelConfig.proto.m4 | 9 +- python/paddle/trainer/config_parser.py | 13 +- .../paddle/trainer_config_helpers/layers.py | 28 +---- .../tests/configs/test_bilinear_interp.py | 5 +- 13 files changed, 121 insertions(+), 127 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index dbad6be3f41b3..8c5cb4cc49d93 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -57,9 +57,9 @@ endif() set(COMMON_FLAGS -fPIC -fno-omit-frame-pointer - -Wall - -Wextra - -Werror +# -Wall +# -Wextra +# -Werror -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wno-unused-parameter diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index ab27c3bd6e8ad..c78682423e448 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -276,7 +276,7 @@ interpolation_layer :noindex: bilinear_interp_layer -------------------- +---------------------- .. automodule:: paddle.trainer_config_helpers.layers :members: bilinear_interp_layer :noindex: diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index b5240da0f398c..ac35727ac28c7 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -254,6 +254,8 @@ extern void hl_CMRNorm_backward( * @param[in] outputH output batchSize. * @param[in] outputW output image data dim. * @param[in] numChannels number of channels. + * @param[in] ratioH inImgH / outImgH. + * @param[in] ratioW inImgW / outImgW. * */ extern void hl_bilinear_forward(const real* inData, @@ -266,7 +268,9 @@ extern void hl_bilinear_forward(const real* inData, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); /** * @brief Bilinear interpolation backward. @@ -282,6 +286,8 @@ extern void hl_bilinear_forward(const real* inData, * @param[in] outputH output batchSize. * @param[in] outputW output image data dim. * @param[in] numChannels number of channels. + * @param[in] ratioH inImgH / outImgH. + * @param[in] ratioW inImgW / outImgW. * */ extern void hl_bilinear_backward(real* inGrad, @@ -294,7 +300,9 @@ extern void hl_bilinear_backward(real* inGrad, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); /** * @brief MaxOut forward. diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/cuda/include/stub/hl_cnn_stub.h index cf79fad9004cd..50fddce584252 100644 --- a/paddle/cuda/include/stub/hl_cnn_stub.h +++ b/paddle/cuda/include/stub/hl_cnn_stub.h @@ -99,7 +99,9 @@ inline void hl_bilinear_forward(const real* inData, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels) {} + const size_t numChannels, + const real ratioH, + const real ratioW) {} inline void hl_bilinear_backward(real* inGrad, const size_t inImgH, @@ -111,7 +113,9 @@ inline void hl_bilinear_backward(real* inGrad, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels) {} + const size_t numChannels, + const real ratioH, + const real ratioW) {} inline void hl_maxout_forward( const real* inData, real* outData, int* idData, diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index 499b61195af5e..49c09334e086d 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -547,29 +547,32 @@ __global__ void KeBilinearInterpFw(const size_t nthreads, const real ratioH, const real ratioW) { int tid = blockIdx.x * blockDim.x + threadIdx.x; - if(tid < nthreads) { - int outIdH = tid / (outputW / numChannels); - int outIdW = tid % (outputW / numChannels); - - int inIdH = ratioH * (outIdW / outImgW); - int hId = (inIdH < inImgH - 1) ? 1 : 0; - real hlambda = ratioH * (outIdW / outImgW) - inIdH; - - int inIdW = ratioW * (tid % outImgW); - int wId = (inIdW < inImgW - 1) ? 1 : 0; - real wlambda = ratioW * (tid % outImgW) - inIdW; - - const real* inPos = &in[outIdH * inputW + inIdH * inImgW + inIdW]; - real* outPos = &out[outIdH * outputW + outIdW]; - for (int c = 0; c < numChannels; ++c) { - // bilinear interpolation - outPos[0] = (1.f - hlambda) * - ((1.f - wlambda) * inPos[0] + wlambda * inPos[wId]) + - hlambda * ((1.f - wlambda) * inPos[hId * inImgW] + - wlambda * inPos[hId * inImgW + wId]); - inPos += inImgH * inImgW; - outPos += outImgH * outImgW; - } + if (tid < nthreads) { + int outIdH = tid / outputW; + int outIdW = tid % outputW; + int inImgSize = inputW / numChannels; + int outImgSize = outputW / numChannels; + int channelId = outIdW / outImgSize; + + int outImgIdy = (outIdW % outImgSize) / outImgW; + int inImgIdy = ratioH * outImgIdy; + int hId = (inImgIdy < inImgH - 1) ? 1 : 0; + real h1lambda = ratioH * outImgIdy - inImgIdy; + real h2lambda = 1.f - h1lambda; + + int outImgIdx = tid % outImgW; + int inImgIdx = ratioW * outImgIdx; + int wId = (inImgIdx < inImgW - 1) ? 1 : 0; + real w1lambda = ratioW * outImgIdx - inImgIdx; + real w2lambda = 1.f - w1lambda; + + const real* inPos = + &in[outIdH * inputW + channelId * inImgSize + inImgIdy * inImgW + inImgIdx]; + + // bilinear interpolation + out[outIdH * outputW + outIdW] = + h2lambda * (w2lambda * inPos[0] + w1lambda * inPos[wId]) + + h1lambda * (w2lambda * inPos[hId * inImgW] + w1lambda * inPos[hId * inImgW + wId]); } } @@ -583,15 +586,12 @@ void hl_bilinear_forward(const real* inData, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels) { - int threadNum = outputH * outImgH * outImgW; + const size_t numChannels, + const real ratioH, + const real ratioW) { + int threadNum = outputH * outputW; int blocks = (threadNum + 1024 - 1) / 1024; - real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; - KeBilinearInterpFw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( threadNum, inData, inImgH, inImgW, inputH, inputW, outData, outImgH, outImgW, outputH, outputW, numChannels, ratioH, ratioW); @@ -613,29 +613,32 @@ __global__ void KeBilinearInterpBw(const size_t nthreads, const real ratioH, const real ratioW) { int tid = blockIdx.x * blockDim.x + threadIdx.x; - - if(tid < nthreads) { - int outIdH = tid / (outputW / numChannels); - int outIdW = tid % (outputW / numChannels); - - int inIdH = ratioH * (outIdW / outImgW); - int hId = (inIdH < inImgH - 1) ? 1 : 0; - real hlambda = ratioH * (outIdW / outImgW) - inIdH; - - int inIdW = ratioW * (tid % outImgW); - int wId = (inIdW < inImgW - 1) ? 1 : 0; - real wlambda = ratioW * (tid % outImgW) - inIdW; - + if (tid < nthreads) { + int outIdH = tid / outputW; + int outIdW = tid % outputW; + int inImgSize = inputW / numChannels; + int outImgSize = outputW / numChannels; + int channelId = outIdW / outImgSize; + + int outImgIdy = (outIdW % outImgSize) / outImgW; + int inImgIdy = ratioH * outImgIdy; + int hId = (inImgIdy < inImgH - 1) ? 1 : 0; + real h1lambda = ratioH * outImgIdy - inImgIdy; + real h2lambda = 1.f - h1lambda; + + int outImgIdx = tid % outImgW; + int inImgIdx = ratioW * outImgIdx; + int wId = (inImgIdx < inImgW - 1) ? 1 : 0; + real w1lambda = ratioW * outImgIdx - inImgIdx; + real w2lambda = 1.f - w1lambda; + + real* inPos = + &in[outIdH * inputW + channelId * inImgSize + inImgIdy * inImgW + inImgIdx]; const real* outPos = &out[outIdH * outputW + outIdW]; - real* inPos = &in[outIdH * inputW + inIdH * inImgW + inIdW]; - for (int c = 0; c < numChannels; ++c) { - atomicAdd(&inPos[0], (1.f - hlambda) * (1.f - wlambda) * outPos[0]); - atomicAdd(&inPos[wId], (1.f - hlambda) * wlambda * outPos[0]); - atomicAdd(&inPos[hId * inImgW], hlambda * (1.f - wlambda) * outPos[0]); - atomicAdd(&inPos[hId * inImgW + wId], hlambda * wlambda * outPos[0]); - inPos += inImgH * inImgW; - outPos += outImgH * outImgW; - } + atomicAdd(&inPos[0], h2lambda * w2lambda * outPos[0]); + atomicAdd(&inPos[wId], h2lambda * w1lambda * outPos[0]); + atomicAdd(&inPos[hId * inImgW], h1lambda * w2lambda * outPos[0]); + atomicAdd(&inPos[hId * inImgW + wId], h1lambda * w1lambda * outPos[0]); } } @@ -649,14 +652,11 @@ void hl_bilinear_backward(real* inGrad, const size_t outImgW, const size_t outputH, const size_t outputW, - const size_t numChannels) { - int threadNum = outputH * outImgH * outImgW; + const size_t numChannels, + const real ratioH, + const real ratioW) { + int threadNum = outputH * outputW; int blocks = (threadNum + 1024 - 1) / 1024; - - real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; KeBilinearInterpBw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( threadNum, inGrad, inImgH, inImgW, inputH, inputW, outGrad, diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/gserver/layers/BilinearInterpLayer.cpp index f43086e585535..9f0c01a838562 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/gserver/layers/BilinearInterpLayer.cpp @@ -20,7 +20,11 @@ namespace paddle { REGISTER_LAYER(bilinear_interp, BilinearInterpLayer); -size_t BilinearInterpLayer::getDataDimSize() { +size_t BilinearInterpLayer::getSize() { + inImgH_ = inputLayers_[0]->getOutput().getFrameHeight(); + inImgW_ = inputLayers_[0]->getOutput().getFrameWidth(); + CHECK(inImgH_ > 0 && inImgW_ > 0); + getOutput().setFrameHeight(outImgH_); getOutput().setFrameWidth(outImgW_); return outImgH_ * outImgW_ * numChannels_; @@ -34,20 +38,12 @@ bool BilinearInterpLayer::init(const LayerMap& layerMap, CHECK_EQ(1, config_.inputs_size()); const BilinearInterpConfig& conf = config_.inputs(0).bilinear_interp_conf(); - inImgH_ = inputLayers_[0]->getOutput().getFrameHeight(); - inImgW_ = inputLayers_[0]->getOutput().getFrameWidth(); - if (inImgH_ == 0) { - inImgH_ = conf.img_size_y(); - } - if (inImgW_ == 0) { - inImgW_ = conf.img_size_x(); - } + outImgH_ = conf.out_size_y(); outImgW_ = conf.out_size_x(); numChannels_ = conf.num_channels(); CHECK(outImgH_ > 0 && outImgW_ > 0); - CHECK(inImgH_ > 0 && inImgW_ > 0); CHECK(numChannels_); return true; @@ -55,8 +51,9 @@ bool BilinearInterpLayer::init(const LayerMap& layerMap, void BilinearInterpLayer::forward(PassType passType) { Layer::forward(passType); + size_t batchSize = getInput(0).getBatchSize(); - size_t size = getDataDimSize(); + size_t size = getSize(); { REGISTER_TIMER_INFO("FwResetTimer", getName().c_str()); resetOutput(batchSize, size); diff --git a/paddle/gserver/layers/BilinearInterpLayer.h b/paddle/gserver/layers/BilinearInterpLayer.h index 24f5b99910405..33e0cb1220511 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.h +++ b/paddle/gserver/layers/BilinearInterpLayer.h @@ -36,7 +36,7 @@ class BilinearInterpLayer : public Layer { virtual ~BilinearInterpLayer() {} - size_t getDataDimSize(); + size_t getSize(); bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); void forward(PassType passType); void backward(const UpdateCallback& callback = nullptr); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index db48cc47a4a63..54a9aea024423 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -40,8 +40,6 @@ TEST(Layer, BilinearInterpLayer) { LayerInputConfig* input = config.layerConfig.add_inputs(); BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf(); - bilinear->set_img_size_x(32); - bilinear->set_img_size_y(32); bilinear->set_out_size_x(64); bilinear->set_out_size_y(64); bilinear->set_num_channels(4); diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index ce4d2ac39938f..33bc8d280fe8c 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1197,12 +1197,18 @@ void GpuMatrix::bilinearForward(const Matrix& in, real* outData = getData(); const real* inData = in.getData(); + real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + if (inImgH == outImgW && inImgW == outImgW) { this->copyFrom(in); } else { - hl_bilinear_forward(inData, inImgH, inImgW, - inputH, inputW, outData, outImgH, outImgW, - outputH, outputW, numChannels); + hl_bilinear_forward( + inData, inImgH, inImgW, inputH, inputW, outData, + outImgH, outImgW, outputH, outputW, numChannels, + ratioH, ratioW); } } @@ -1222,12 +1228,18 @@ void GpuMatrix::bilinearBackward(const Matrix& out, real* inGrad = getData(); const real* outGrad = out.getData(); + real ratioH = (outImgH > 1) ? + static_cast(inImgH - 1) / (outImgH - 1) : 0.f; + real ratioW = (outImgW > 1) ? + static_cast(inImgW - 1) / (outImgW - 1) : 0.f; + if (outImgH == inImgH && outImgW == inImgW) { this->copyFrom(out); } else { - hl_bilinear_backward(inGrad, inImgH, inImgW, - inputH, inputW, outGrad, outImgH, outImgW, - outputH, outputW, numChannels); + hl_bilinear_backward( + inGrad, inImgH, inImgW, inputH, inputW, outGrad, + outImgH, outImgW, outputH, outputW, numChannels, + ratioH, ratioW); } } diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 753fd0cac4223..a1eb11eccaeda 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -213,13 +213,10 @@ message OperatorConfig { } message BilinearInterpConfig { - // The size if input feature map. - required uint32 img_size_x = 1; - required uint32 img_size_y = 2; // The size if output feature map. - required uint32 out_size_x = 3; - required uint32 out_size_y = 4; - required uint32 num_channels = 5; + required uint32 out_size_x = 1; + required uint32 out_size_y = 2; + required uint32 num_channels = 3; } message ImageConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index c6cd4f62b91c9..574c02eefc35f 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -734,8 +734,6 @@ def __init__( class BilinearInterp(Cfg): def __init__( self, - img_size_x = None, - img_size_y=None, out_size_x = None, out_size_y = None, num_channels = None): @@ -982,8 +980,6 @@ def TestData(data_config, async_load_data=None): g_config.test_data_config.async_load_data = async_load_data def parse_bilinear(bilinear, input_layer_name, bilinear_conf): - bilinear_conf.img_size_x = bilinear.img_size_x; - bilinear_conf.img_size_y = bilinear.img_size_y; bilinear_conf.out_size_x = bilinear.out_size_x; bilinear_conf.out_size_y = bilinear.out_size_y; bilinear_conf.num_channels = bilinear.num_channels; @@ -2367,15 +2363,16 @@ def __init__( self, name, inputs, - device=None): + **xargs): super(BilinearInterpLayer, self).__init__( - name, 'bilinear_interp', 0, inputs=inputs, device=device) + name, 'bilinear_interp', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) - self.set_layer_size(input_layer.size) parse_bilinear(self.inputs[0].bilinear_interp, input_layer.name, self.config.inputs[0].bilinear_interp_conf); - + conf = self.inputs[0].bilinear_interp + self.set_layer_size(conf.out_size_x * conf.out_size_y * conf.num_channels) + @config_layer('sum_to_one_norm') class SumToOneNormLayer(LayerBase): def __init__( diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 8d249b140e8cd..6457c60a35392 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1259,11 +1259,8 @@ def interpolation_layer(input, weight, name=None, layer_attr=None): @wrap_name_default() @layer_support() def bilinear_interp_layer(input, - img_size_x=None, - img_size_y=None, out_size_x=None, out_size_y=None, - num_channels=None, name=None, layer_attr=None): """ @@ -1276,25 +1273,15 @@ def bilinear_interp_layer(input, .. code-block:: python bilinear = bilinear_interp_layer(input, - img_size_x, - img_size_y, out_size_x, - out_size_y, - num_channels) + out_size_y) :para input: A input layer. :type input: LayerOutput. - :para img_size_x: previous layer output width. - :type img_size_x: int|None - :para img_size_y: previous layer output height. - :type img_size_y: int|None :para out_size_x: bilinear interpolation output width. :type out_size_x: int|None :para out_size_y: bilinear interpolation output height. :type out_size_y: int|None - :para num_channels: number of channels of input layer. If None, - it will be set automatically from previous output. - :type num_channels: int|None :para name: The layer's name, which cna not be specified. :type name: None|basestring :para layer_attr: Extra Layer attribute. @@ -1304,21 +1291,18 @@ def bilinear_interp_layer(input, """ assert input.layer_type == LayerType.CONV_LAYER assert isinstance(input.activation, LinearActivation) - assert img_size_x > 0 and img_size_y > 0 assert out_size_x > 0 and out_size_y > 0 - if num_channels is None: - assert input.numfilters is not None - num_channels = input.num_filters + assert input.numfilters is not None + num_channels = input.num_filters Layer(name=name, inputs=Input(input.name, - bilinear_interp=BilinearInterp(img_size_x=img_size_x, - img_size_y=img_size_y, - out_size_x=out_size_x, + bilinear_interp=BilinearInterp(out_size_x=out_size_x, out_size_y=out_size_y, num_channels=num_channels)), type=LayerType.BILINEAR_INTERP_LAYER, **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input]) + return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], + num_filters=num_filters) @wrap_name_default() @layer_support() diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py index 7815b34abcc25..5a61c5a395af2 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.py @@ -16,11 +16,8 @@ bias_attr=True) bilinear = bilinear_interp_layer(input=conv, - img_size_x=32, - img_size_y=32, out_size_x=64, - out_size_y=64, - num_channels=16) + out_size_y=64) pool = img_pool_layer(input=bilinear, num_channels=4, From 45b8c47e043e6163dc3e228bf2fb7a6f6cab9b43 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 15:10:15 +0800 Subject: [PATCH 084/180] Add img_size for unit test --- cmake/flags.cmake | 6 ++--- paddle/gserver/layers/BilinearInterpLayer.cpp | 24 ++++++++++++------- paddle/gserver/tests/test_LayerGrad.cpp | 2 ++ proto/ModelConfig.proto.m4 | 11 +++++---- 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 337db53b37fa3..e087770991aef 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -65,9 +65,9 @@ endif() set(COMMON_FLAGS -fPIC -fno-omit-frame-pointer -# -Wall -# -Wextra -# -Werror + -Wall + -Wextra + -Werror -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wno-unused-parameter diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/gserver/layers/BilinearInterpLayer.cpp index 9f0c01a838562..f37efc824a2ec 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/gserver/layers/BilinearInterpLayer.cpp @@ -23,7 +23,22 @@ REGISTER_LAYER(bilinear_interp, BilinearInterpLayer); size_t BilinearInterpLayer::getSize() { inImgH_ = inputLayers_[0]->getOutput().getFrameHeight(); inImgW_ = inputLayers_[0]->getOutput().getFrameWidth(); + + const BilinearInterpConfig& conf = config_.inputs(0).bilinear_interp_conf(); + if (inImgH_ == 0) { + inImgH_ = conf.img_size_y(); + } + if (inImgW_ == 0) { + inImgW_ = conf.img_size_x(); + } + + outImgH_ = conf.out_size_y(); + outImgW_ = conf.out_size_x(); + numChannels_ = conf.num_channels(); + + CHECK(outImgH_ > 0 && outImgW_ > 0); CHECK(inImgH_ > 0 && inImgW_ > 0); + CHECK(numChannels_); getOutput().setFrameHeight(outImgH_); getOutput().setFrameWidth(outImgW_); @@ -37,15 +52,6 @@ bool BilinearInterpLayer::init(const LayerMap& layerMap, CHECK_EQ(1, config_.inputs_size()); - const BilinearInterpConfig& conf = config_.inputs(0).bilinear_interp_conf(); - - outImgH_ = conf.out_size_y(); - outImgW_ = conf.out_size_x(); - numChannels_ = conf.num_channels(); - - CHECK(outImgH_ > 0 && outImgW_ > 0); - CHECK(numChannels_); - return true; } diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 8ee89992ddf1e..b5b944833e47f 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -40,6 +40,8 @@ TEST(Layer, BilinearInterpLayer) { LayerInputConfig* input = config.layerConfig.add_inputs(); BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf(); + bilinear->set_img_size_x(32); + bilinear->set_img_size_y(32); bilinear->set_out_size_x(64); bilinear->set_out_size_y(64); bilinear->set_num_channels(4); diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 41d081942778e..479b457e55a7d 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -213,10 +213,13 @@ message OperatorConfig { } message BilinearInterpConfig { - // The size if output feature map. - required uint32 out_size_x = 1; - required uint32 out_size_y = 2; - required uint32 num_channels = 3; + // The size of input feature map. + optional uint32 img_size_x = 1; + optional uint32 img_size_y = 2; + // The size of output feature map. + required uint32 out_size_x = 3; + required uint32 out_size_y = 4; + required uint32 num_channels = 5; } message ImageConfig { From d373c10091205e2ec44f1a1781a73a8869166272 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 15:53:22 +0800 Subject: [PATCH 085/180] Fix bilinear interp bug --- cmake/flags.cmake | 6 +++--- paddle/math/Matrix.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index e087770991aef..03407e72d60b2 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -65,9 +65,9 @@ endif() set(COMMON_FLAGS -fPIC -fno-omit-frame-pointer - -Wall - -Wextra - -Werror +# -Wall +# -Wextra +# -Werror -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wno-unused-parameter diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index f1af9536ba5d6..3b11e831fe429 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1275,7 +1275,7 @@ void GpuMatrix::bilinearBackward(const Matrix& out, static_cast(inImgW - 1) / (outImgW - 1) : 0.f; if (outImgH == inImgH && outImgW == inImgW) { - this->copyFrom(out); + this->addBias(const_cast(out), 1.f); } else { hl_bilinear_backward( inGrad, inImgH, inImgW, inputH, inputW, outGrad, @@ -3979,7 +3979,7 @@ void CpuMatrix::bilinearBackward(const Matrix& out, static_cast(inImgW - 1) / (outImgW - 1) : 0.f; if (inImgH == outImgH && inImgW == outImgW) { - this->copyFrom(out); + this->addBias(const_cast(out), 1.f); } else { for (size_t k = 0; k < outputH; ++k) { // loop for batches for (size_t i = 0; i < outImgH; ++i) { // loop for images From 6e12482e4fd7c920349e832e799c14b779ec9e8c Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 15:53:48 +0800 Subject: [PATCH 086/180] revert flags.cmake --- cmake/flags.cmake | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/flags.cmake b/cmake/flags.cmake index 03407e72d60b2..e087770991aef 100644 --- a/cmake/flags.cmake +++ b/cmake/flags.cmake @@ -65,9 +65,9 @@ endif() set(COMMON_FLAGS -fPIC -fno-omit-frame-pointer -# -Wall -# -Wextra -# -Werror + -Wall + -Wextra + -Werror -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wno-unused-parameter From 724d6dd40a6af8b0f60ea206e34ca2ba1515eff5 Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 16:08:36 +0800 Subject: [PATCH 087/180] Replace outputH to batchSize --- paddle/math/Matrix.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 3b11e831fe429..283733fe845d4 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -3912,7 +3912,7 @@ void CpuMatrix::bilinearForward(const Matrix& in, CHECK(dynamic_cast(&in)); size_t outputW = getWidth(); - size_t outputH = getHeight(); + size_t batchSize = getHeight(); size_t inputW = in.getWidth(); size_t inputH = in.getHeight(); (void)(inputH); @@ -3928,7 +3928,7 @@ void CpuMatrix::bilinearForward(const Matrix& in, if (inImgH == outImgH && inImgW == outImgW) { this->copyFrom(in); } else { - for (size_t k = 0; k < outputH; ++k) { // loop for batches + for (size_t k = 0; k < batchSize; ++k) { // loop for batches for (size_t i = 0; i < outImgH; ++i) { // loop for images size_t h = ratioH * i; size_t hid = (h < inImgH - 1) ? 1 : 0; @@ -3967,7 +3967,7 @@ void CpuMatrix::bilinearBackward(const Matrix& out, size_t inputW = getWidth(); size_t inputH = getHeight(); size_t outputW = out.getWidth(); - size_t outputH = out.getHeight(); + size_t batchSize = out.getHeight(); (void)(inputH); real* inGrad = getData(); @@ -3981,7 +3981,7 @@ void CpuMatrix::bilinearBackward(const Matrix& out, if (inImgH == outImgH && inImgW == outImgW) { this->addBias(const_cast(out), 1.f); } else { - for (size_t k = 0; k < outputH; ++k) { // loop for batches + for (size_t k = 0; k < batchSize; ++k) { // loop for batches for (size_t i = 0; i < outImgH; ++i) { // loop for images size_t h = ratioH * i; size_t hid = (h < inImgH - 1) ? 1 : 0; From 91a4064e35bb30dc73420f8e5f0c4525ac2765ed Mon Sep 17 00:00:00 2001 From: liaogang Date: Mon, 7 Nov 2016 16:17:36 +0800 Subject: [PATCH 088/180] Follow comments --- python/paddle/trainer_config_helpers/layers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index bf722fb1ba453..038f4d32a588e 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1292,7 +1292,7 @@ def bilinear_interp_layer(input, assert input.layer_type == LayerType.CONV_LAYER assert isinstance(input.activation, LinearActivation) assert out_size_x > 0 and out_size_y > 0 - assert input.numfilters is not None + assert input.num_filters is not None num_channels = input.num_filters Layer(name=name, inputs=Input(input.name, @@ -1301,8 +1301,7 @@ def bilinear_interp_layer(input, num_channels=num_channels)), type=LayerType.BILINEAR_INTERP_LAYER, **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], - num_filters=num_filters) + return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input]) @wrap_name_default() @layer_support() From f9849ac92d3aa89b8bddbe9f08dd8ae9768132f5 Mon Sep 17 00:00:00 2001 From: gangliao Date: Mon, 7 Nov 2016 00:54:28 -0800 Subject: [PATCH 089/180] Revise one word in ISSUE_TEMPLATE.md (#371) --- ISSUE_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index b70d66dc259af..6b2614b101108 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -7,7 +7,7 @@ Before submitting the issue, look over the following criteria before handing you - [ ] Was there a similar issue submitted or resolved before ? You could search issue in the github. - [ ] Did you retrieve your issue from widespread search engines ? - [ ] Is my description of the issue clear enough to reproduce this problem? - * If some errors occured, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc. + * If some errors occurred, we need details about `how do you run your code?`, `what system do you use?`, `Are you using GPU or not?`, etc. * If you use an recording [asciinema](https://asciinema.org/) to show what you are doing to make it happen, that's awesome! We could help you solve the problem more quickly. - [ ] Is my description of the issue use the github markdown correctly? * Please use the proper markdown syntaxes for styling all forms of writing, e.g, source code, error information, etc. From e802471c58068669de78a3eaec143b705cb654b8 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Mon, 7 Nov 2016 19:41:20 +0800 Subject: [PATCH 090/180] abstract outputSize function in CNN-related layers (#314) --- paddle/gserver/layers/ConvBaseLayer.cpp | 9 ++- paddle/gserver/layers/ConvBaseLayer.h | 26 +------ paddle/gserver/layers/ConvOperator.cpp | 72 ++++++------------- paddle/gserver/layers/ConvProjection.h | 12 ++-- paddle/gserver/layers/CudnnPoolLayer.cpp | 20 +++--- paddle/gserver/layers/PoolLayer.h | 11 +-- paddle/gserver/layers/PoolProjectionLayer.cpp | 29 ++++---- paddle/gserver/tests/test_LayerGrad.cpp | 56 +++++++-------- paddle/math/MathUtils.cpp | 19 +++-- paddle/math/MathUtils.h | 16 +++++ python/paddle/trainer/config_parser.py | 53 +++++++------- 11 files changed, 139 insertions(+), 184 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 040510b7ad211..42ff0b70d86f7 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/utils/Logging.h" #include "ConvBaseLayer.h" namespace paddle { @@ -78,10 +77,10 @@ size_t ConvBaseLayer::calOutputSize() { imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); if (imgSizeW_[i] == 0) imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); - outputH_.push_back( - outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); - outputW_.push_back( - outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); + outputH_.push_back(outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], + strideY_[i], caffeMode_)); + outputW_.push_back(outputSize(imgSizeW_[i], filterSize_[i], padding_[i], + stride_[i], caffeMode_)); CHECK_EQ(outputH_[i], outputH_[0]); CHECK_EQ(outputW_[i], outputW_[0]); } diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index 316514acf1a0d..e660a6d6f50ac 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -16,6 +16,7 @@ limitations under the License. */ #pragma once #include "Layer.h" +#include "paddle/math/MathUtils.h" namespace paddle { /** @@ -87,31 +88,6 @@ class ConvBaseLayer : public Layer { virtual size_t calOutputSize(); Weight& getWeight(int idx) { return *weights_[idx]; } - - /** - * Calculate output size based on caffeMode_. - * - input(+padding): 0123456789 - * - imageSize(+padding) = 10; - * - filterSize = 3; - * - stride = 2; - * - caffeMode_ is true: - - output: (012), (234), (456), (678) - - outputSize = 4; - * - caffeMode_ is false: - * - output: (012), (234), (456), (678), (9) - * - outputSize = 5; - */ - int outputSize(int imageSize, int filterSize, int padding, int stride) { - int outputSize; - if (!caffeMode_) { - outputSize = - (imageSize - filterSize + 2 * padding + stride - 1) / stride + 1; - } else { - outputSize = (imageSize - filterSize + 2 * padding) / stride + 1; - } - CHECK_GE(outputSize, 1); - return outputSize; - } }; } // namespace paddle diff --git a/paddle/gserver/layers/ConvOperator.cpp b/paddle/gserver/layers/ConvOperator.cpp index 8c72c1778451d..2d9c892fe595f 100644 --- a/paddle/gserver/layers/ConvOperator.cpp +++ b/paddle/gserver/layers/ConvOperator.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/math/Matrix.h" +#include "paddle/math/MathUtils.h" #include "Operator.h" namespace paddle { @@ -35,8 +35,8 @@ class ConvOperator : public Operator { */ virtual ~ConvOperator() { if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpace_); - workSpaceInBytes_ = 0; + hl_free_mem_device(workSpace_); + workSpaceInBytes_ = 0; } hl_destroy_tensor_descriptor(inputDesc_); @@ -83,33 +83,6 @@ class ConvOperator : public Operator { filterSize_ * filterSizeY_ * channels_ * numFilters_); } - /** - * Calculate output size. - */ - int outputSize(int imageSize, int filterSize, int padding, int stride) { - int outputSize; - if (!caffeMode_) { - /* input(+padding): 0123456789 - * imageSize(+padding) = 10; - * filterSize = 3; - * stride = 2; - * output: (012), (234), (456), (678), (9) - * outputSize = 5; - */ - outputSize = - (imageSize - filterSize + 2 * padding + stride - 1) / stride + 1; - } else { - /* input(+padding): 0123456789 - * imageSize(+padding) = 10; - * filterSize = 3; - * stride = 2; - * output: (012), (234), (456), (678) - * outputSize = 4; - */ - outputSize = (imageSize - filterSize + 2 * padding) / stride + 1; - } - return outputSize; - } /// Most of member variables are same with CudnnConvLayer. /// There is no explanation here. int imageH_, imageW_, outputH_, outputW_; @@ -129,7 +102,7 @@ class ConvOperator : public Operator { int fwdAlgo_, bwdFilterAlgo_, bwdDataAlgo_; size_t fwdLimitBytes_, bwdDataLimitBytes_, bwdFilterLimitBytes_; size_t workSpaceInBytes_; - void* workSpace_; + void *workSpace_; bool isSelectAlgo_; }; @@ -160,7 +133,7 @@ ConvOperator::ConvOperator(const OperatorConfig &config, bool useGpu) void ConvOperator::allocConvWorkSpace(size_t maxWorkSpace) { if (maxWorkSpace > workSpaceInBytes_) { if (workSpaceInBytes_ != 0) { - hl_free_mem_device(workSpace_); + hl_free_mem_device(workSpace_); } // total amount of storage needed workSpace_ = hl_malloc_device(maxWorkSpace); @@ -168,14 +141,13 @@ void ConvOperator::allocConvWorkSpace(size_t maxWorkSpace) { } } - void ConvOperator::reshape(int batchSize) { imageH_ = ins_[0]->getFrameHeight(); imageW_ = ins_[0]->getFrameWidth(); if (imageH_ == 0) imageH_ = imgSize_; if (imageW_ == 0) imageW_ = imgSize_; - outputH_ = outputSize(imageH_, filterSizeY_, paddingY_, strideY_); - outputW_ = outputSize(imageW_, filterSize_, padding_, stride_); + outputH_ = outputSize(imageH_, filterSizeY_, paddingY_, strideY_, caffeMode_); + outputW_ = outputSize(imageW_, filterSize_, padding_, stride_, caffeMode_); out_->setFrameHeight(outputH_); out_->setFrameWidth(outputW_); @@ -183,10 +155,10 @@ void ConvOperator::reshape(int batchSize) { reshapeImageDescriptors(); if (!isSelectAlgo_) { - hl_conv_workspace(inputDesc_, outputDesc_, filterDesc_, - convDesc_, &fwdAlgo_, &fwdLimitBytes_, - &bwdDataAlgo_, &bwdDataLimitBytes_, - &bwdFilterAlgo_, &bwdFilterLimitBytes_); + hl_conv_workspace(inputDesc_, outputDesc_, filterDesc_, convDesc_, + &fwdAlgo_, &fwdLimitBytes_, &bwdDataAlgo_, + &bwdDataLimitBytes_, &bwdFilterAlgo_, + &bwdFilterLimitBytes_); size_t maxWorkSpace = 0; maxWorkSpace = std::max(fwdLimitBytes_, bwdDataLimitBytes_); @@ -202,7 +174,8 @@ void ConvOperator::computeConvSizes() { hl_create_filter_descriptor(&filterDesc_, channels_, numFilters_, filterSizeY_, filterSize_); hl_create_tensor_descriptor(&inputDesc_); - int outputX = outputSize(imgSize_, filterSize_, padding_, stride_); + int outputX = + outputSize(imgSize_, filterSize_, padding_, stride_, caffeMode_); CHECK_EQ(outputX, outputX_); hl_create_tensor_descriptor(&outputDesc_); hl_create_convolution_descriptor(&convDesc_, inputDesc_, filterDesc_, @@ -211,13 +184,13 @@ void ConvOperator::computeConvSizes() { void ConvOperator::reshapeImageDescriptors() { hl_tensor_reshape(inputDesc_, 1, channels_, imageH_, imageW_, - channels_ * imageH_ * imageW_, imageH_ * imageW_, - imageW_, 1); + channels_ * imageH_ * imageW_, imageH_ * imageW_, imageW_, + 1); hl_tensor_reshape(outputDesc_, 1, numFilters_, outputH_, outputW_, numFilters_ * outputH_ * outputW_, outputH_ * outputW_, outputW_, 1); - hl_reset_convolution_descriptor(convDesc_, inputDesc_, filterDesc_, - paddingY_, padding_, strideY_, stride_); + hl_reset_convolution_descriptor(convDesc_, inputDesc_, filterDesc_, paddingY_, + padding_, strideY_, stride_); inputOffset_ = channels_ * imageH_ * imageW_; outputOffset_ = numFilters_ * outputH_ * outputW_; weightOffset_ = numFilters_ * channels_ * filterSize_ * filterSize_; @@ -273,18 +246,17 @@ void ConvOperator::backward() { real *weightGrad = ins_[1]->grad->getData() + weightOffset_ * batchId; hl_convolution_backward_filter(inputDesc_, inputData, outputDesc_, outGrad, filterDesc_, weightGrad, - convDesc_, workSpace_, - workSpaceInBytes_, bwdFilterAlgo_); + convDesc_, workSpace_, workSpaceInBytes_, + bwdFilterAlgo_); } MatrixPtr preGrad = ins_[0]->grad; if (NULL != preGrad) { real *inputGrad = preGrad->getData() + inputOffset_ * batchId; real *wgtData = ins_[1]->value->getData() + weightOffset_ * batchId; - hl_convolution_backward_data(inputDesc_, inputGrad, outputDesc_, - outGrad, filterDesc_, wgtData, - convDesc_, workSpace_, - workSpaceInBytes_, bwdDataAlgo_); + hl_convolution_backward_data( + inputDesc_, inputGrad, outputDesc_, outGrad, filterDesc_, wgtData, + convDesc_, workSpace_, workSpaceInBytes_, bwdDataAlgo_); } } } diff --git a/paddle/gserver/layers/ConvProjection.h b/paddle/gserver/layers/ConvProjection.h index 41a100ac3c50f..d0bfe9a6edba0 100644 --- a/paddle/gserver/layers/ConvProjection.h +++ b/paddle/gserver/layers/ConvProjection.h @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include "Projection.h" +#include "paddle/math/MathUtils.h" namespace paddle { @@ -42,17 +42,15 @@ class ConvProjection : public Projection { void reshapeTensorDesc(int batchSize); void reshape(int batchSize); - int outputSize(int imageSize, int filterSize, int padding, int stride) { - return (imageSize - filterSize + 2 * padding) / stride + 1; - } - size_t calOutputSize() { imageH_ = in_->getFrameHeight(); imageW_ = in_->getFrameWidth(); if (imageH_ == 0) imageH_ = configImgH_; if (imageW_ == 0) imageW_ = configImgW_; - outputH_ = outputSize(imageH_, filterH_, paddingH_, strideH_); - outputW_ = outputSize(imageW_, filterW_, paddingW_, strideW_); + outputH_ = outputSize(imageH_, filterH_, paddingH_, strideH_, + /* caffeMode */ true); + outputW_ = outputSize(imageW_, filterW_, paddingW_, strideW_, + /* caffeMode */ true); const_cast(out_)->setFrameHeight(outputH_); const_cast(out_)->setFrameWidth(outputW_); diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/gserver/layers/CudnnPoolLayer.cpp index 4c733591b3779..24adb50a985ff 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/gserver/layers/CudnnPoolLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" #include "paddle/math/Matrix.h" @@ -62,9 +61,9 @@ bool CudnnPoolLayer::init(const LayerMap &layerMap, strideHeight = strideY_; strideWidth = stride_; - hl_create_pooling_descriptor(&poolingDesc_, mode_, windowHeight, - windowWidth, heightPadding, widthPadding, - strideHeight, strideWidth); + hl_create_pooling_descriptor(&poolingDesc_, mode_, windowHeight, windowWidth, + heightPadding, widthPadding, strideHeight, + strideWidth); return true; } @@ -80,8 +79,10 @@ void CudnnPoolLayer::reshape(int batchSize) { } CHECK_EQ(inputLayers_[0]->getOutput().value->getWidth(), channels_ * imageH_ * imageW_); - outputH_ = outputSize(imageH_, sizeY_, confPaddingY_, strideY_); - outputW_ = outputSize(imageW_, sizeX_, confPadding_, stride_); + outputH_ = outputSize(imageH_, sizeY_, confPaddingY_, strideY_, + /* caffeMode */ false); + outputW_ = + outputSize(imageW_, sizeX_, confPadding_, stride_, /* caffeMode */ false); getOutput().setFrameHeight(outputH_); getOutput().setFrameWidth(outputW_); @@ -99,8 +100,7 @@ void CudnnPoolLayer::forward(PassType passType) { real *inputData = getInputValue(0)->getData(); real *outData = getOutputValue()->getData(); - hl_pooling_forward(inputDesc_, inputData, outputDesc_, outData, - poolingDesc_); + hl_pooling_forward(inputDesc_, inputData, outputDesc_, outData, poolingDesc_); } void CudnnPoolLayer::backward(const UpdateCallback &callback) { @@ -113,8 +113,8 @@ void CudnnPoolLayer::backward(const UpdateCallback &callback) { real *inputGrad = getInputGrad(0)->getData(); real *outData = getOutputValue()->getData(); real *outGrad = getOutputGrad()->getData(); - hl_pooling_backward(inputDesc_, inputData, inputGrad, outputDesc_, - outData, outGrad, poolingDesc_); + hl_pooling_backward(inputDesc_, inputData, inputGrad, outputDesc_, outData, + outGrad, poolingDesc_); } CudnnPoolLayer::~CudnnPoolLayer() { diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/gserver/layers/PoolLayer.h index bde1f5b8dcbfd..e87ad08251dd4 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/gserver/layers/PoolLayer.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "Layer.h" #include "paddle/math/Matrix.h" +#include "paddle/math/MathUtils.h" #include namespace paddle { @@ -47,16 +48,6 @@ class PoolLayer : public Layer { static Layer* create(const LayerConfig& config); virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - - /** - * Calculate output size according window size and padding size. - */ - int outputSize(int imageSize, int windowSize, int padding, int stride) { - int outputSize; - outputSize = - (imageSize - windowSize + 2 * padding + stride - 1) / stride + 1; - return outputSize; - } }; } // namespace paddle diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index 5a2e9afb6e164..9e8ce778501bb 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "paddle/utils/Logging.h" #include "paddle/utils/Stat.h" #include "PoolProjectionLayer.h" @@ -31,8 +30,10 @@ size_t PoolProjectionLayer::getSize() { imgSizeW_ = imgSize_; } - outputH_ = outputSize(imgSizeH_, sizeY_, confPaddingY_, strideY_); - outputW_ = outputSize(imgSizeW_, sizeX_, confPadding_, stride_); + outputH_ = outputSize(imgSizeH_, sizeY_, confPaddingY_, strideY_, + /* caffeMode */ false); + outputW_ = outputSize(imgSizeW_, sizeX_, confPadding_, stride_, + /* caffeMode */ false); layerSize = outputH_ * outputW_ * channels_; @@ -53,9 +54,9 @@ void MaxPoolProjectionLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); - outV->maxPoolForward(*input, imgSizeH_, imgSizeW_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, confPaddingY_, confPadding_); + outV->maxPoolForward(*input, imgSizeH_, imgSizeW_, channels_, sizeX_, sizeY_, + strideY_, stride_, outputH_, outputW_, confPaddingY_, + confPadding_); } void MaxPoolProjectionLayer::backward(const UpdateCallback& callback) { @@ -72,9 +73,8 @@ void MaxPoolProjectionLayer::backward(const UpdateCallback& callback) { MatrixPtr inputGrad = getInputGrad(0); inputGrad->maxPoolBackward(*inputV, imgSizeH_, imgSizeW_, *outGrad, *outV, - sizeX_, sizeY_, - strideY_, stride_, outputH_, outputW_, 1, 1, - confPaddingY_, confPadding_); + sizeX_, sizeY_, strideY_, stride_, outputH_, + outputW_, 1, 1, confPaddingY_, confPadding_); } void AvgPoolProjectionLayer::forward(PassType passType) { @@ -89,9 +89,9 @@ void AvgPoolProjectionLayer::forward(PassType passType) { MatrixPtr outV = getOutputValue(); - outV->avgPoolForward(*input, imgSizeH_, imgSizeW_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, confPaddingY_, confPadding_); + outV->avgPoolForward(*input, imgSizeH_, imgSizeW_, channels_, sizeX_, sizeY_, + strideY_, stride_, outputH_, outputW_, confPaddingY_, + confPadding_); } void AvgPoolProjectionLayer::backward(const UpdateCallback& callback) { @@ -103,9 +103,8 @@ void AvgPoolProjectionLayer::backward(const UpdateCallback& callback) { /* Do derivation */ MatrixPtr outputGrad = getOutputGrad(); MatrixPtr inputGrad = getInputGrad(0); - inputGrad->avgPoolBackward(*outputGrad, imgSizeH_, imgSizeW_, - sizeX_, sizeY_, strideY_, stride_, - outputH_, outputW_, 1, 1, + inputGrad->avgPoolBackward(*outputGrad, imgSizeH_, imgSizeW_, sizeX_, sizeY_, + strideY_, stride_, outputH_, outputW_, 1, 1, confPaddingY_, confPadding_); } } // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index bf2c2e0499941..5397b952bced8 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -18,6 +18,7 @@ limitations under the License. */ #include "paddle/gserver/layers/DataLayer.h" #include "ModelConfig.pb.h" #include "paddle/trainer/Trainer.h" +#include "paddle/math/MathUtils.h" #include "TestUtil.h" #include "LayerGradUtil.h" @@ -134,7 +135,6 @@ TEST(Projection, identity) { } } - #ifndef PADDLE_ONLY_CPU TEST(Projection, conv) { const int NUM_FILTERS = 16; @@ -158,21 +158,23 @@ TEST(Projection, conv) { conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_img_size(IMAGE_SIZE); - int outputSize = (2 * conv->padding() + conv->img_size() - - conv->filter_size()) / conv->stride() + 1; - int outputSizeY = (2 * conv->padding_y() + conv->img_size() - - conv->filter_size_y()) / conv->stride_y() + 1; - conv->set_output_x(outputSize); + int output_x = + outputSize(conv->img_size(), conv->filter_size(), conv->padding(), + conv->stride(), /* caffeMode */ true); + int output_y = + outputSize(conv->img_size(), conv->filter_size_y(), conv->padding_y(), + conv->stride_y(), /* caffeMode */ true); + conv->set_output_x(output_x); conf.set_input_size(IMAGE_SIZE * IMAGE_SIZE * CHANNELS); - conf.set_output_size(outputSize * outputSizeY * NUM_FILTERS); + conf.set_output_size(output_x * output_y * NUM_FILTERS); - testProjectionGrad(conf, INPUT_DATA, + testProjectionGrad( + conf, INPUT_DATA, /* parameterSize */ NUM_FILTERS * CHANNELS * FILTER_SIZE * FILTER_SIZE_Y, /* batchSize */ 100, true, false, NUM_FILTERS, true); } #endif - TEST(Layer, concat) { TestConfig config; config.biasSize = 0; @@ -293,10 +295,9 @@ void testConvLayer(const string& type, bool trans, bool useGpu) { conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_img_size(16); - conv->set_output_x( - (2 * conv->padding() + conv->img_size() - conv->filter_size()) / - ((float)conv->stride()) + - 1.5); + conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(), + conv->padding(), conv->stride(), + /* caffeMode */ true)); config.layerConfig.set_size(conv->output_x() * conv->output_x() * config.layerConfig.num_filters()); @@ -329,15 +330,13 @@ TEST(Layer, blockExpandLayer) { blockExpand->set_stride_x(2); blockExpand->set_stride_y(2); blockExpand->set_output_x( - 1 + - (2 * blockExpand->padding_x() + blockExpand->img_size_x() - - blockExpand->block_x() + blockExpand->stride_x() - 1) / - blockExpand->stride_x()); + outputSize(blockExpand->img_size_x(), blockExpand->block_x(), + blockExpand->padding_x(), blockExpand->stride_x(), + /* caffeMode */ false)); blockExpand->set_output_y( - 1 + - (2 * blockExpand->padding_y() + blockExpand->img_size_y() - - blockExpand->block_y() + blockExpand->stride_y() - 1) / - blockExpand->stride_y()); + outputSize(blockExpand->img_size_y(), blockExpand->block_y(), + blockExpand->padding_y(), blockExpand->stride_y(), + /* caffeMode */ false)); config.layerConfig.set_size(blockExpand->block_x() * blockExpand->block_y() * blockExpand->channels()); @@ -862,8 +861,8 @@ void setPoolConfig(TestConfig* config, PoolConfig* pool, pool->set_stride(sw); pool->set_stride_y(sh); - int ow = (pool->img_size() - kw + 2 * pw + sw - 1) / sw + 1; - int oh = (pool->img_size_y() - kh + 2 * ph + sh - 1) / sh + 1; + int ow = outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false); + int oh = outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false); pool->set_output_x(ow); pool->set_output_y(oh); } @@ -1255,12 +1254,11 @@ TEST(Operator, conv) { conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_img_size(IMAGE_SIZE); - int outputSize = - int(1.0 * (2 * conv->padding() + conv->img_size() - conv->filter_size()) / - conv->stride()) + - 1; - conv->set_output_x(outputSize); - config.layerConfig.set_size(outputSize * outputSize * + int output_x = + outputSize(conv->img_size(), conv->filter_size(), conv->padding(), + conv->stride(), /* caffeMode */ true); + conv->set_output_x(output_x); + config.layerConfig.set_size(output_x * output_x * config.layerConfig.num_filters()); config.layerConfig.set_size(conv->output_x() * conv->output_x() * NUM_FILTERS); diff --git a/paddle/math/MathUtils.cpp b/paddle/math/MathUtils.cpp index 5b78ab1b07bda..c1af8628d03c5 100644 --- a/paddle/math/MathUtils.cpp +++ b/paddle/math/MathUtils.cpp @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #include "MathUtils.h" #include #include "paddle/utils/Logging.h" @@ -24,11 +23,7 @@ namespace paddle { * major is rows and minor is cols, according to * major value to initialize minor value" */ -void sparseRand(int* major, - int* minor, - int nnz, - int majorLen, - int minorMax, +void sparseRand(int* major, int* minor, int nnz, int majorLen, int minorMax, bool useGpu) { CHECK(size_t(nnz) > size_t(1)); int* cpuMajor; @@ -72,5 +67,17 @@ void sparseRand(int* major, } } +int outputSize(int imageSize, int filterSize, int padding, int stride, + bool caffeMode) { + int outputSize; + if (!caffeMode) { + outputSize = + (imageSize - filterSize + 2 * padding + stride - 1) / stride + 1; + } else { + outputSize = (imageSize - filterSize + 2 * padding) / stride + 1; + } + CHECK_GE(outputSize, 1); + return outputSize; +} } // namespace paddle diff --git a/paddle/math/MathUtils.h b/paddle/math/MathUtils.h index 83375022abbe2..49d0c10a8f5e4 100644 --- a/paddle/math/MathUtils.h +++ b/paddle/math/MathUtils.h @@ -44,4 +44,20 @@ namespace paddle { void sparseRand(int* major, int* minor, int nnz, int majorLen, int minorMax, bool useGpu); +/** + * Calculate output size based on caffeMode_. + * - input(+padding): 0123456789 + * - imageSize(+padding) = 10; + * - filterSize = 3; + * - stride = 2; + * - caffeMode is true: + - output: (012), (234), (456), (678) + - outputSize = 4; + * - caffeMode is false: + * - output: (012), (234), (456), (678), (9) + * - outputSize = 5; + */ +int outputSize(int imageSize, int filterSize, int padding, int stride, + bool caffeMode); + } // namespace paddle diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index e9038fea8a208..73631602a92be 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1006,6 +1006,17 @@ def TestData(data_config, async_load_data=None): " Data definition") g_config.test_data_config.async_load_data = async_load_data +''' +caffe_mode: compute the output size using floor instead of ceil, + which is consistent of caffe and CuDNN's convention. +''' +def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode): + output = (2 * padding + img_size - filter_size) / float(stride) + if caffe_mode: + return 1 + int(math.floor(output)) + else: + return 1 + int(math.ceil(output)) + def parse_pool(pool, input_layer_name, pool_conf): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in ['max-projection', 'avg-projection', @@ -1036,12 +1047,10 @@ def parse_pool(pool, input_layer_name, pool_conf): if pool.padding is not None: pool_conf.padding = pool.padding pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) - pool_conf.output_x = int(math.ceil((pool_conf.img_size + \ - 2*pool_conf.padding - pool_conf.size_x) / \ - float(pool_conf.stride))) + 1 - pool_conf.output_y = int(math.ceil((pool_conf.img_size_y + \ - 2*pool_conf.padding_y - pool_conf.size_y) / \ - float(pool_conf.stride_y))) + 1 + pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x, + pool_conf.padding, pool_conf.stride, False) + pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y, + pool_conf.padding_y, pool_conf.stride_y, False) def parse_image(image, input_layer_name, image_conf): image_conf.channels = image.channels @@ -1072,10 +1081,7 @@ def parse_norm(norm, input_layer_name, norm_conf): norm_conf.scale /= norm.size else: norm_conf.scale /= norm.size ** 2 -''' -caffe_mode: compute the output size using floor instead of ceil, - which is consistent of caffe and CuDNN's convention. -''' + def parse_conv(conv, input_layer_name, conv_conf): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y @@ -1096,14 +1102,9 @@ def parse_conv(conv, input_layer_name, conv_conf): ("Input layer %s: Incorrect input image size %d for input " + "image pixels %d") % (input_layer_name, conv_conf.img_size, img_pixels)) - if conv.caffe_mode: - conv_conf.output_x = \ - 1 + int(math.floor((2 * conv.padding + conv_conf.img_size \ - - conv.filter_size) / float(conv.stride))) - else: - conv_conf.output_x = \ - 1 + int(math.ceil((2 * conv.padding + conv_conf.img_size \ - - conv.filter_size) / float(conv.stride))) + conv_conf.output_x = cnn_output_size(conv_conf.img_size, conv_conf.filter_size, + conv_conf.padding, conv_conf.stride, + conv_conf.caffe_mode) def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels @@ -1118,18 +1119,16 @@ def parse_block_expand(block_expand, input_layer_name, block_expand_conf): if block_expand_conf.img_size_x == 0: block_expand_conf.output_x = 0 else: - block_expand_conf.output_x = \ - 1 + \ - int(math.ceil((2 * block_expand.padding_x + block_expand.img_size_x \ - - block_expand.block_x) / float(block_expand.stride_x))) + block_expand_conf.output_x = cnn_output_size( + block_expand.img_size_x, block_expand.block_x, + block_expand.padding_x, block_expand.stride_x, False) if block_expand_conf.img_size_y == 0: - block_expand_conf.output_y = 0 + block_expand_conf.output_y = 0 else: - block_expand_conf.output_y = \ - 1 + \ - int(math.ceil((2 * block_expand.padding_y + block_expand.img_size_y \ - - block_expand.block_y) / float(block_expand.stride_y))) + block_expand_conf.output_y = cnn_output_size( + block_expand.img_size_y, block_expand.block_y, + block_expand.padding_y, block_expand.stride_y, False) def parse_maxout(maxout, input_layer_name, maxout_conf): maxout_conf.channels = maxout.channels From 4905751a22e5211defafcc56d16a26114e61ca25 Mon Sep 17 00:00:00 2001 From: lzhao4ever Date: Mon, 7 Nov 2016 14:09:20 -0800 Subject: [PATCH 091/180] Add define for double getrf, getri (#381) --- paddle/cuda/src/hl_cuda_cublas.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/cuda/src/hl_cuda_cublas.cc index 724ea490e8ea9..f16376ec937d3 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/cuda/src/hl_cuda_cublas.cc @@ -78,6 +78,8 @@ DYNAMIC_LOAD_CUBLAS_WRAP(cublasCgemmBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasZgemmBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasSgetrfBatched) DYNAMIC_LOAD_CUBLAS_WRAP(cublasSgetriBatched) +DYNAMIC_LOAD_CUBLAS_WRAP(cublasDgetrfBatched) +DYNAMIC_LOAD_CUBLAS_WRAP(cublasDgetriBatched) CUBLAS_BLAS_ROUTINE_EACH(DYNAMIC_LOAD_CUBLAS_V2_WRAP) #undef DYNAMIC_LOAD_CUBLAS_WRAP From ebad8e525d711230b63cd0112a55cac3f6cc751a Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Fri, 4 Nov 2016 13:04:28 -0700 Subject: [PATCH 092/180] Add SumCost This allows user to implement any type of cost by summing over the output of non-cost layers. Change-Id: Ic55aaabbf0c1299e70b8e48a0effcc91f8f5bd29 --- paddle/gserver/layers/CostLayer.cpp | 29 +++++++++++++++ paddle/gserver/tests/test_LayerGrad.cpp | 13 +++++++ python/paddle/trainer/config_parser.py | 1 + .../paddle/trainer_config_helpers/layers.py | 37 ++++++++++++++++--- .../tests/configs/test_cost_layers.py | 6 ++- 5 files changed, 79 insertions(+), 7 deletions(-) diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 14ff8510f7b19..0bb8359a904c8 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -562,4 +562,33 @@ void HuberTwoClass::backwardImpIn( } } +class SumCostLayer : public Layer { +public: + explicit SumCostLayer(const LayerConfig& config) : Layer(config) {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) { + bool ret = Layer::init(layerMap, parameterMap); + if (!ret) return ret; + CHECK_EQ(inputLayers_.size(), 1UL); + return true; + } + + virtual void forward(PassType passType) { + Layer::forward(passType); + const MatrixPtr& input = getInputValue(0); + + /* malloc memory for the output_ if necessary */ + int batchSize = input->getHeight(); + int size = 1; + resizeOutput(batchSize, size); + output_.value->sumRows(*input); + } + + virtual void backward(const UpdateCallback& callback = nullptr) { + getInputGrad(0)->add((real)1); + } +}; + +REGISTER_LAYER(sum_cost, SumCostLayer); + } // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 5397b952bced8..61b89f5ec3c5c 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -935,6 +935,19 @@ TEST(Layer, rankCostLayer) { } } +TEST(Layer, sumCostLayer) { + TestConfig config; + config.layerConfig.set_type("sum_cost"); + config.biasSize = 0; + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 1, 0}); + config.layerConfig.add_inputs(); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "sum_cost", 100, false, useGpu); + } +} + TEST(Layer, weightedRankCostLayer) { TestConfig config; config.layerConfig.set_type("rank-cost"); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 73631602a92be..5a0d5018f0d11 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1795,6 +1795,7 @@ def init(cls, name, inputs, device=None, coeff=1.): define_cost('MultiBinaryLabelCrossEntropy', 'multi_binary_label_cross_entropy') define_cost('SoftBinaryClassCrossEntropy', 'soft_binary_class_cross_entropy') define_cost('HuberTwoClass', 'huber') +define_cost('SumCost', 'sum_cost') @config_layer('hsigmoid') class HierarchicalSigmoidLayer(LayerBase): diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 49f0ff3289db7..c768a419c0bff 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -52,7 +52,7 @@ 'convex_comb_layer', 'ctc_layer', 'crf_layer', 'crf_decoding_layer', 'nce_layer', 'cross_entropy_with_selfnorm', 'cross_entropy', - 'multi_binary_label_cross_entropy', + 'multi_binary_label_cross_entropy', 'sum_cost', 'rank_cost', 'lambda_cost', 'huber_cost', 'block_expand_layer', 'maxout_layer', 'out_prod_layer', 'print_layer' @@ -126,6 +126,7 @@ class LayerType(object): CROSS_ENTROPY_WITH_SELFNORM = "multi_class_cross_entropy_with_selfnorm" SOFT_BIN_CLASS_CROSS_ENTROPY = "soft_binary_class_cross_entropy" MULTI_BIN_LABEL_CROSS_ENTROPY = "multi_binary_label_cross_entropy" + SUM_COST = "sum_cost" @staticmethod def is_layer_type(type_name): @@ -3924,8 +3925,6 @@ def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None): :type input: LayerOutput. :param label: The input label. :type input: LayerOutput. - :param type: The type of cost. - :type type: basestring. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param coeff: The coefficient affects the gradient in the backward. @@ -3961,8 +3960,6 @@ def cross_entropy_with_selfnorm(input, label, name=None, coeff=1.0, :type input: LayerOutput. :param label: The input label. :type input: LayerOutput. - :param type: The type of cost. - :type type: basestring. :param name: The name of this layers. It is not necessary. :type name: None|basestring. :param coeff: The coefficient affects the gradient in the backward. @@ -3987,6 +3984,36 @@ def cross_entropy_with_selfnorm(input, label, name=None, coeff=1.0, parents=[input, label]) +@wrap_name_default() +@layer_support() +def sum_cost(input, name=None, layer_attr=None): + """ + A loss layer which calculate the sum of the input as loss + + .. code-block:: python + + cost = sum_cost(input) + + :param input: The first input layer. + :type input: LayerOutput. + :param name: The name of this layers. It is not necessary. + :type name: None|basestring. + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput. + """ + Layer(name=name, + type=LayerType.SUM_COST, + inputs=[input.name], + **ExtraLayerAttribute.to_kwargs(layer_attr) + ) + + return LayerOutput(name, + LayerType.SUM_COST, + parents=[input]) + + @wrap_name_default() @layer_support() def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None): diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py index 64b45f4ded10b..f1b3365f84e3e 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py @@ -11,8 +11,9 @@ probs = data_layer(name='probs', size=10) xe_label = data_layer(name='xe-label', size=10) +hidden = fc_layer(input=seq_in, size=4) outputs(ctc_layer(input=seq_in, label=labels), - crf_layer(input=fc_layer(input=seq_in, size=4), + crf_layer(input=hidden, label=data_layer(name='crf_label', size=4)), rank_cost(left=data_layer(name='left', size=1), right=data_layer(name='right', size=1), @@ -23,4 +24,5 @@ cross_entropy_with_selfnorm(input=probs, label=xe_label), huber_cost(input=data_layer(name='huber_probs', size=1), label=data_layer(name='huber_label', size=1)), - multi_binary_label_cross_entropy(input=probs, label=xe_label)) + multi_binary_label_cross_entropy(input=probs, label=xe_label), + sum_cost(hidden)) From 38764bf908578dae66d2d4ce6bb5a2380f3c8cb4 Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Mon, 7 Nov 2016 14:28:23 -0800 Subject: [PATCH 093/180] Add sum_cost to document And rebase Change-Id: I7ea234b3aa8fc70675af15d91db08242c43fb5ff --- doc/source/gserver/layers/layer.rst | 5 +++ doc/ui/api/trainer_config_helpers/layers.rst | 6 +++ paddle/gserver/layers/CostLayer.cpp | 6 +++ paddle/gserver/layers/CostLayer.h | 2 +- .../protostr/test_cost_layers.protostr | 37 +++++++++++++------ 5 files changed, 43 insertions(+), 13 deletions(-) diff --git a/doc/source/gserver/layers/layer.rst b/doc/source/gserver/layers/layer.rst index 807b22ca140ee..4b8e149505f06 100644 --- a/doc/source/gserver/layers/layer.rst +++ b/doc/source/gserver/layers/layer.rst @@ -465,6 +465,11 @@ SumOfSquaresCostLayer .. doxygenclass:: paddle::SumOfSquaresCostLayer :members: +SumCostLayer +````````````````````` +.. doxygenclass:: paddle::SumCostLayer + :members: + CosSimLayer ----------- .. doxygenclass:: paddle::CosSimLayer diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index 5bb88b0615c12..c2e347d12b3f8 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -395,6 +395,12 @@ hsigmoid :members: hsigmoid :noindex: +sum_cost +--------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: sum_cost + :noindex: + Check Layer ============ diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 0bb8359a904c8..949788be49787 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -562,6 +562,12 @@ void HuberTwoClass::backwardImpIn( } } +/** + * This cost layer compute the sum of its input as loss. + * \f[ + * o(i) = \sum_{j=1}^D y_{ij} + * \f] + */ class SumCostLayer : public Layer { public: explicit SumCostLayer(const LayerConfig& config) : Layer(config) {} diff --git a/paddle/gserver/layers/CostLayer.h b/paddle/gserver/layers/CostLayer.h index b464e16737ae5..f263c688213ae 100644 --- a/paddle/gserver/layers/CostLayer.h +++ b/paddle/gserver/layers/CostLayer.h @@ -129,7 +129,7 @@ class SoftBinaryClassCrossEntropy : public CostLayer { * This cost layer compute Euclidean (L2) loss for real-valued regression * tasks. * \f[ - * L = \frac{1}{2N} \sum_{i=1}^N {|| \hat{y}_i - y_i||_2^2} + * L = \sum_{i=1}^N {|| \hat{y}_i - y_i||_2^2} * \f] */ class SumOfSquaresCostLayer : public CostLayer { diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr index 5261cf0c44943..f6045fe1f6825 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_cost_layers.protostr @@ -23,6 +23,17 @@ layers { size: 10 active_type: "" } +layers { + name: "__fc_layer_0__" + type: "fc" + size: 4 + active_type: "tanh" + inputs { + input_layer_name: "input" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} layers { name: "__ctc_layer_0__" type: "ctc" @@ -36,17 +47,6 @@ layers { } norm_by_times: false } -layers { - name: "__fc_layer_0__" - type: "fc" - size: 4 - active_type: "tanh" - inputs { - input_layer_name: "input" - input_parameter_name: "___fc_layer_0__.w0" - } - bias_parameter_name: "___fc_layer_0__.wbias" -} layers { name: "crf_label" type: "data" @@ -191,6 +191,16 @@ layers { } coeff: 1.0 } +layers { + name: "__sum_cost_0__" + type: "sum_cost" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + coeff: 1.0 +} parameters { name: "___fc_layer_0__.w0" size: 800 @@ -241,14 +251,15 @@ output_layer_names: "__cross_entropy_0__" output_layer_names: "__cross_entropy_with_selfnorm_0__" output_layer_names: "__huber_cost_0__" output_layer_names: "__multi_binary_label_cross_entropy_0__" +output_layer_names: "__sum_cost_0__" sub_models { name: "root" layer_names: "input" layer_names: "labels" layer_names: "probs" layer_names: "xe-label" - layer_names: "__ctc_layer_0__" layer_names: "__fc_layer_0__" + layer_names: "__ctc_layer_0__" layer_names: "crf_label" layer_names: "__crf_layer_0__" layer_names: "left" @@ -264,6 +275,7 @@ sub_models { layer_names: "huber_label" layer_names: "__huber_cost_0__" layer_names: "__multi_binary_label_cross_entropy_0__" + layer_names: "__sum_cost_0__" input_layer_names: "input" input_layer_names: "labels" input_layer_names: "crf_label" @@ -284,6 +296,7 @@ sub_models { output_layer_names: "__cross_entropy_with_selfnorm_0__" output_layer_names: "__huber_cost_0__" output_layer_names: "__multi_binary_label_cross_entropy_0__" + output_layer_names: "__sum_cost_0__" is_recurrent_layer_group: false } From 2e1d968fc284fa733e6ed20191875b003116c083 Mon Sep 17 00:00:00 2001 From: gangliao Date: Mon, 7 Nov 2016 19:05:37 -0800 Subject: [PATCH 094/180] Remove Mac OS X build docs (#386) Currently, Paddle on Mac OS X is not deliberate testing through the different versions of Mac OS X and Clang. When all these things that we've done, we will reopen Mac build docs. --- doc/build/build_from_source.md | 119 --------------------------------- 1 file changed, 119 deletions(-) diff --git a/doc/build/build_from_source.md b/doc/build/build_from_source.md index c37234d3ef14d..b8f26f431eb7a 100644 --- a/doc/build/build_from_source.md +++ b/doc/build/build_from_source.md @@ -4,7 +4,6 @@ Installing from Sources * [1. Download and Setup](#download) * [2. Requirements](#requirements) * [3. Build on Ubuntu](#ubuntu) -* [4. Build on Mac OS X](#mac) ## Download and Setup You can download PaddlePaddle from the [github source](https://github.com/gangliao/Paddle). @@ -191,121 +190,3 @@ sudo pip install /opt/paddle/share/wheels/*.whl # or just run sudo paddle version ``` - -## Building on Mac OS X - -### Prerequisites -This guide is based on Mac OS X 10.11 (El Capitan). Note that if you are running an up to date version of OS X, -you will already have Python 2.7.10 and Numpy 1.8 installed. - -The best option is to use the package manager homebrew to handle installations and upgrades for you. -To install [homebrew](http://brew.sh/), first open a terminal window (you can find Terminal in the Utilities folder in Applications), and issue the command: - -```bash -# install brew -/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -# install pip -easy_install pip -``` - -### Install Dependencies - -- **CPU Dependencies** - - ```bash - # Install fundamental dependents - brew install glog gflags cmake protobuf openblas - - # Install google test on Mac OS X - # Download gtest 1.7.0 - wget https://github.com/google/googletest/archive/release-1.7.0.tar.gz - tar -xzf googletest-release-1.7.0.tar.gz && cd googletest-release-1.7.0 - # Build gtest - mkdir build && cd build && cmake .. && make - # Install gtest library - sudo cp -r ../include/gtest /usr/local/include/ - sudo cp lib*.a /usr/local/lib - ``` - -- **GPU Dependencies(optional)** - - To build GPU version, you will need the following installed: - - 1. a CUDA-capable GPU - 2. Mac OS X 10.11 or later - 2. the Clang compiler and toolchain installed using Xcode - 3. NVIDIA CUDA Toolkit (available at http://developer.nvidia.com/cuda-downloads) - 4. NVIDIA cuDNN Library (availabel at https://developer.nvidia.com/cudnn) - - The CUDA development environment relies on tight integration with the host development environment, - including the host compiler and C runtime libraries, and is therefore only supported on - distribution versions that have been qualified for this CUDA Toolkit release. - - 1. After downloading cuDNN library, issue the following commands: - - ```bash - sudo tar -xzf cudnn-7.5-osx-x64-v5.0-ga.tgz -C /usr/local - sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib/libcudnn* - ``` - 2. Then you need to set DYLD\_LIBRARY\_PATH, PATH environment variables in ~/.bashrc. - - ```bash - export DYLD_LIBRARY_PATH=/usr/local/cuda/lib:$DYLD_LIBRARY_PATH - export PATH=/usr/local/cuda/bin:$PATH - ``` - -### Build and Install - -As usual, the best option is to create build folder under paddle project directory. - -```bash -mkdir build && cd build -cmake .. -``` - -CMake first check PaddlePaddle's dependencies in system default path. After installing some optional -libraries, corresponding build option will be set automatically (for instance, glog, gtest and gflags). -If still not found, you can manually set it based on CMake error information from your screen. - -As a simple example, consider the following: - -- **Only CPU** - - ```bash - cmake .. -DWITH_GPU=OFF - ``` -- **GPU** - - ```bash - cmake .. -DWITH_GPU=ON - ``` - -- **GPU with doc and swig** - - ```bash - cmake .. -DWITH_GPU=ON -DWITH_DOC=ON -DWITH_SWIG_PY=ON - ``` - -Finally, you can build PaddlePaddle: - -```bash -# you can add build option here, such as: -cmake .. -DWITH_GPU=ON -DCMAKE_INSTALL_PREFIX= -# please use sudo make install, if you want to install PaddlePaddle into the system -make -j `sysctl -n hw.ncpu` && make install -# set PaddlePaddle installation path in ~/.bashrc -export PATH=/bin:$PATH -``` -**Note:** - -If you set `WITH_SWIG_PY=ON`, related python dependencies also need to be installed. -Otherwise, PaddlePaddle will automatically install python dependencies -at first time when user run paddle commands, such as `paddle version`, `paddle train`. -It may require sudo privileges: - -```bash -# you can run -sudo pip install /opt/paddle/share/wheels/*.whl -# or just run -sudo paddle version -``` From 5ece5c96ada7a14099408f072abefd213b08bbce Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 8 Nov 2016 03:36:02 +0000 Subject: [PATCH 095/180] add python wrap for sppLayer --- doc/ui/api/trainer_config_helpers/layers.rst | 12 ++++ paddle/gserver/layers/PoolProjection.cpp | 2 +- .../layers/SpatialPyramidPoolLayer.cpp | 6 +- .../gserver/layers/SpatialPyramidPoolLayer.h | 1 - paddle/gserver/tests/test_LayerGrad.cpp | 2 + paddle/math/Matrix.cpp | 14 +++-- proto/ModelConfig.proto.m4 | 6 +- python/paddle/trainer/config_parser.py | 46 +++++++++++++- .../paddle/trainer_config_helpers/layers.py | 61 ++++++++++++++++++- .../tests/configs/check.md5 | 1 + .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/test_spp_layer.py | 17 ++++++ 12 files changed, 155 insertions(+), 15 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index 5bb88b0615c12..a7cf57d01799d 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -46,6 +46,12 @@ conv_operator :members: conv_operator :noindex: +conv_projection +------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: conv_projection + :noindex: + conv_shift_layer ------------------ .. automodule:: paddle.trainer_config_helpers.layers @@ -71,6 +77,12 @@ img_pool_layer -------------- .. automodule:: paddle.trainer_config_helpers.layers :members: img_pool_layer + :noindex: + +spp_layer +-------------- +.. automodule:: paddle.trainer_config_helpers.layers + :members: spp_layer :noindex: maxout_layer diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index 468ca6f1b7d2d..e10788e926470 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -16,7 +16,7 @@ limitations under the License. */ namespace paddle { -REGISTER_PROJECTION_CREATE_FUNC(pool2, &PoolProjection::create); +REGISTER_PROJECTION_CREATE_FUNC(pool, &PoolProjection::create); PoolProjection* PoolProjection::create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) { diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp index bcdba5c151175..83334a59882ac 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp @@ -24,7 +24,7 @@ ProjectionConfig SpatialPyramidPoolLayer::getConfig(size_t imgSizeW, size_t pyramidLevel, std::string& poolType) { ProjectionConfig config; - config.set_type("pool2"); + config.set_type("pool"); PoolConfig* conf = config.mutable_pool_conf(); conf->set_channels(channels); conf->set_img_size(imgSizeW); @@ -93,7 +93,7 @@ bool SpatialPyramidPoolLayer::init(const LayerMap& layerMap, startCol = endCol; projInput_.emplace_back(Argument()); } - outputSize_ = endCol; + CHECK_EQ(endCol, getSize()); return true; } @@ -101,7 +101,7 @@ void SpatialPyramidPoolLayer::forward(PassType passType) { Layer::forward(passType); int batchSize = getInput(0).getBatchSize(); - resetOutput(batchSize, outputSize_); + resetOutput(batchSize, getSize()); for (size_t i = 0; i < pyramidHeight_; i++) { size_t startCol = projCol_[i].first; size_t endCol = projCol_[i].second; diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/gserver/layers/SpatialPyramidPoolLayer.h index de1fd4da07dd8..156581530a1bc 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.h @@ -27,7 +27,6 @@ class SpatialPyramidPoolLayer : public Layer { size_t imgSizeW_; size_t imgSizeH_; size_t pyramidHeight_; - size_t outputSize_; std::string poolType_; std::vector> poolProjections_; diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index c3597f56070ef..595e20354ad5b 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -931,6 +931,8 @@ void testSppLayer(const string& poolType, const int pyramidHeight, bool trans, sppConfig->set_channels(16); sppConfig->set_img_size(10); sppConfig->set_img_size_y(20); + int outputSize = (std::pow(4, sppConfig->pyramid_height()) - 1) / (4 - 1); + config.layerConfig.set_size(outputSize * sppConfig->channels()); testLayerGrad(config, "spp", 100, trans, useGpu); } diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 721c3de59fbcd..607334aaa934b 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1510,18 +1510,19 @@ void CpuMatrix::maxPoolForward(Matrix& inputMat, size_t imgSizeH, CHECK(inHeight * inWidth == inputMat.getWidth() / channels); CHECK_EQ(num, this->getHeight()); CHECK_EQ(channels * outputH * outputW, this->getWidth()); + size_t outStride = getStride(); /* initialize the data_ */ for (size_t i = 0; i < height_; i++) { for (size_t j = 0; j < width_; j++) { - outData[i * getStride() + j] = -(real)FLT_MAX; + outData[i * outStride + j] = -(real)FLT_MAX; } } /* pool max one by one */ for (size_t n = 0; n < num; ++n) { // frame by frame if (!isContiguous()) { - outData = data_ + n * getStride(); + outData = data_ + n * outStride; } for (size_t c = 0; c < channels; ++c) { // channel by channel for (size_t ph = 0; ph < outputH; ++ph) { @@ -1564,10 +1565,15 @@ void CpuMatrix::maxPoolBackward(Matrix& image, size_t imgSizeH, size_t imgSizeW, real* inData = image.getData(); real* otData = outV.getData(); real* otGrad = outGrad.getData(); + + size_t outStride = outV.getStride(); + real* origOutData = otData; + real* origOutGrad = otGrad; + for (size_t n = 0; n < num; ++n) { if (!outV.isContiguous()) { - otData = outV.getData() + n * outV.getStride(); - otGrad = outGrad.getData() + n * outGrad.getStride(); + otData = origOutData + n * outStride; + otGrad = origOutGrad + n * outStride; } for (size_t c = 0; c < channels; ++c) { for (size_t ph = 0; ph < outputH; ++ph) { diff --git a/proto/ModelConfig.proto.m4 b/proto/ModelConfig.proto.m4 index 4ef1550105492..a247f6f3e7ed4 100644 --- a/proto/ModelConfig.proto.m4 +++ b/proto/ModelConfig.proto.m4 @@ -202,11 +202,11 @@ message ProjectionConfig { optional ConvConfig conv_conf = 8; optional int32 num_filters = 9; - // For pool - optional PoolConfig pool_conf = 10; - // For IdentityOffsetProjection optional uint64 offset = 11 [default = 0]; + + // For pool + optional PoolConfig pool_conf = 12; } message OperatorConfig { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index e9098943165fd..7ad2b7fd5ce83 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -470,6 +470,7 @@ def __init__( image=None, block_expand=None, maxout=None, + spp=None, format=None, nnz=None, is_static=None, @@ -669,7 +670,6 @@ def calc_bias_size(self): def calc_parameter_dims(self, input_size, output_size): return None - # Define a operator for mixed layer @config_class class Operator(Cfg): @@ -783,6 +783,15 @@ def __init__( padding_y = None): self.add_keys(locals()) +class SpatialPyramidPool(Cfg): + def __init__( + self, + pool_type, + pyramid_height, + channels, + img_width = None): + self.add_keys(locals()) + # please refer to the comments in proto/ModelConfig.proto @config_class class Norm(Cfg): @@ -1043,6 +1052,22 @@ def parse_pool(pool, input_layer_name, pool_conf): 2*pool_conf.padding_y - pool_conf.size_y) / \ float(pool_conf.stride_y))) + 1 +def parse_spp(spp, input_layer_name, spp_conf): + spp_conf.pool_type = spp.pool_type + config_assert(spp.pool_type in ['max-projection', 'avg-projection'], + "pool-type %s is not in " "['max-projection', 'avg-projection']" + % spp.pool_type) + spp_conf.pyramid_height = spp.pyramid_height + spp_conf.channels = spp.channels + + img_pixels = g_layer_map[input_layer_name].size / spp_conf.channels + + spp_conf.img_size = default(spp.img_width, int(img_pixels ** 0.5)) + spp_conf.img_size_y = img_pixels / spp_conf.img_size + config_assert(spp_conf.img_size * spp_conf.img_size_y == img_pixels, + "Incorrect input image size %d for input image pixels %d" + % (spp_conf.img_size, img_pixels)) + def parse_image(image, input_layer_name, image_conf): image_conf.channels = image.channels image_pixels = g_layer_map[input_layer_name].size / image_conf.channels @@ -1649,6 +1674,25 @@ def __init__( name, pool_conf.output_y, pool_conf.output_x)) self.set_layer_size((pool_conf.output_x * pool_conf.output_y) * pool_conf.channels) +@config_layer('spp') +class SpatialPyramidPoolLayer(LayerBase): + def __init__( + self, + name, + inputs, + device=None): + super(SpatialPyramidPoolLayer, self).__init__(name, 'spp', 0, inputs=inputs, device=device) + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + parse_spp( + self.inputs[input_index].spp, + input_layer.name, + self.config.inputs[input_index].spp_conf) + spp_conf = self.config.inputs[input_index].spp_conf + output_size = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1) + print("output size for %s is %d " % (name, output_size)) + self.set_layer_size(output_size * spp_conf.channels) + @config_layer('batch_norm') class BatchNormLayer(LayerBase): layer_type = 'batch_norm' diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 9a23c02431d18..03243c03b021b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -55,7 +55,8 @@ 'multi_binary_label_cross_entropy', 'rank_cost', 'lambda_cost', 'huber_cost', 'block_expand_layer', - 'maxout_layer', 'out_prod_layer', 'print_layer' + 'maxout_layer', 'out_prod_layer', 'print_layer', + 'spp_layer', ] @@ -111,6 +112,7 @@ class LayerType(object): LINEAR_COMBINATION_LAYER = "convex_comb" BLOCK_EXPAND = "blockexpand" MAXOUT = "maxout" + SPP_LAYER = "spp" PRINT_LAYER = "print" @@ -868,6 +870,7 @@ def pooling_layer(input, pooling_type=None, name=None, bias_attr=None, size=input.size) + @wrap_bias_attr_default() @wrap_param_attr_default() @wrap_act_default(param_names=['gate_act'], @@ -1708,6 +1711,62 @@ def img_pool_layer(input, pool_size, name=None, num_filters=num_channels) +@wrap_name_default("spp") +@layer_support() +def spp_layer(input, name=None, num_channels=None, pool_type=None, + pyramid_height=None, img_width=None, layer_attr=None): + pass + """ + Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. + The details please refer to + `Kaiming He's paper `_. + + :param name: layer name. + :type name: basestring + :param input: layer's input. + :type input: LayerOutput + :param num_channels: number of input channel. + :type num_channels: int + :param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling. + :type scale: BasePoolingType + :param pyramid_height: pyramid height. + :type pyramid_height: int + :param img_width: the width of input feature map. If it is None, the input feature + map should be square. + :type img_width: int|None + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters + + if pool_type is None: + pool_type = MaxPooling() + elif isinstance(pool_type, AvgPooling): + pool_type.name = 'avg' + + type_name = pool_type.name + if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)): + type_name += '-projection' + + Layer( + name=name, + type=LayerType.SPP_LAYER, + inputs=Input(input.name, + spp=SpatialPyramidPool(pool_type=type_name, + channels=num_channels, + pyramid_height=pyramid_height, + img_width=img_width) + ), + **ExtraLayerAttribute.to_kwargs(layer_attr) + ) + return LayerOutput(name, LayerType.SPP_LAYER, parents=[input], + num_filters=num_channels) + + def __img_norm_layer__(name, input, size, norm_type, scale, power, num_channels, blocked, layer_attr): if num_channels is None: diff --git a/python/paddle/trainer_config_helpers/tests/configs/check.md5 b/python/paddle/trainer_config_helpers/tests/configs/check.md5 index 72dfdad7bdd40..bf0512420ed3c 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/check.md5 +++ b/python/paddle/trainer_config_helpers/tests/configs/check.md5 @@ -20,3 +20,4 @@ fded24727338fb8ce44d9951ed8aea08 test_rnn_group.protostr 67d6fde3afb54f389d0ce4ff14726fe1 test_sequence_pooling.protostr f586a548ef4350ba1ed47a81859a64cb unused_layers.protostr f937a5a6e7e8864b4d8cf56b0f7c7f44 util_layers.protostr +60c9a71e19bd4b2a1253712799d0ae70 test_spp_layer.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 6a31ceabdf36d..6102c614de3b3 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -9,7 +9,7 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -test_maxout test_bi_grumemory) +test_maxout test_bi_grumemory test_spp_layer) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py new file mode 100644 index 0000000000000..6786c27639ea8 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py @@ -0,0 +1,17 @@ +from paddle.trainer_config_helpers import * + +settings( + batch_size=100, + learning_rate=1e-5 +) + +data = data_layer(name='data', size=3200) + + +spp = spp_layer(input=data, + pyramid_height=2, + num_channels=16, + pool_type=MaxPooling(), + img_width=10) + +outputs(spp) From 098e7caa23bb18a512b5809d1128225e4e12ede6 Mon Sep 17 00:00:00 2001 From: gangliao Date: Mon, 7 Nov 2016 19:37:48 -0800 Subject: [PATCH 096/180] Cancelling Travis build with docs updates only. (#372) --- .travis.yml | 10 ++++++++++ README.md | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 7812ac0283789..b5a00bb2ca0ec 100644 --- a/.travis.yml +++ b/.travis.yml @@ -39,6 +39,16 @@ addons: - lcov - graphviz before_install: + - | + if [ ${JOB} == "BUILD_AND_TEST" ]; then + if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then + TRAVIS_COMMIT_RANGE="FETCH_HEAD...$TRAVIS_BRANCH" + fi + git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' || { + echo "Only markdown docs were updated, stopping build process." + exit + } + fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - pip install wheel protobuf sphinx breathe recommonmark diff --git a/README.md b/README.md index 66767d7ff8e4a..81ff8c7122ab8 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ developed by Baidu scientists and engineers for the purpose of applying deep learning to many products at Baidu. Our vision is to enable deep learning for everyone via PaddlePaddle. -Please refer to our [release log](https://github.com/baidu/Paddle/releases) to track the latest feature of PaddlePaddle. +Please refer to our [release announcement](https://github.com/baidu/Paddle/releases) to track the latest feature of PaddlePaddle. ## Features From 8b5cb2955c3b656077a1d2c1ed8f7ec309b50472 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Tue, 8 Nov 2016 12:57:21 +0800 Subject: [PATCH 097/180] fix deadlink in Chinese quick start doc. (#389) --- doc_cn/demo/quick_start/index.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc_cn/demo/quick_start/index.md b/doc_cn/demo/quick_start/index.md index aa6b66ca8c024..4d9b24ba851a7 100644 --- a/doc_cn/demo/quick_start/index.md +++ b/doc_cn/demo/quick_start/index.md @@ -134,9 +134,8 @@ define_py_data_sources2(train_list='data/train.list', * obj="process": 指定生成数据的函数 * args={"dictionary": word_dict}: 额外的参数,这里指定词典 -更详细用例请参考文档Python Use Case, -数据格式和详细文档请参考 -PyDataProviderWrapper。 +更详细数据格式和用例请参考 +PyDataProvider2。 ## 网络结构(Network Architecture) 本节我们将专注于网络结构的介绍。 From a275fe93aae77553bd833d995855d0da5f6a508f Mon Sep 17 00:00:00 2001 From: luotao1 Date: Tue, 8 Nov 2016 13:26:22 +0800 Subject: [PATCH 098/180] add python-related unittest problem in faq document (#377) --- doc_cn/faq/index.rst | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/doc_cn/faq/index.rst b/doc_cn/faq/index.rst index db28b4436fe5e..3eb0e10ae2228 100644 --- a/doc_cn/faq/index.rst +++ b/doc_cn/faq/index.rst @@ -177,3 +177,40 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 pip install --upgrade pip +8. python相关的单元测试都过不了 +-------------------------------- + +如果出现以下python相关的单元测试都过不了的情况: + +.. code-block:: bash + + 24 - test_PyDataProvider (Failed) + 26 - test_RecurrentGradientMachine (Failed) + 27 - test_NetworkCompare (Failed) + 28 - test_PyDataProvider2 (Failed) + 32 - test_Prediction (Failed) + 33 - test_Compare (Failed) + 34 - test_Trainer (Failed) + 35 - test_TrainerOnePass (Failed) + 36 - test_CompareTwoNets (Failed) + 37 - test_CompareTwoOpts (Failed) + 38 - test_CompareSparse (Failed) + 39 - test_recurrent_machine_generation (Failed) + 40 - test_PyDataProviderWrapper (Failed) + 41 - test_config_parser (Failed) + 42 - test_swig_api (Failed) + 43 - layers_test (Failed) + +并且查询PaddlePaddle单元测试的日志,提示: + +.. code-block:: bash + + paddle package is already in your PYTHONPATH. But unittest need a clean environment. + Please uninstall paddle package before start unittest. Try to 'pip uninstall paddle'. + +解决办法是:卸载paddle包 :code:`pip uninstall paddle`。 + +原因是:单元测试使用了一个旧版本的python包,而没有测试到代码中实际修改的python包。即单元测试需要一个干净的环境: + +* 如果paddle包已经在python的site-packages里面了,那么单元测试时使用的paddle包,就是site-packages里面的python包,而不是源码目录里 :code:`/python` 目录下的python包。 +* 即便设置了 :code:`PYTHONPATH` 到 :code:`/python` 也没用,因为python的搜索路径是优先已经安装的python包。 \ No newline at end of file From 56b23d1838daf3757e7f6f5cac8abf3f295a2c15 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Mon, 7 Nov 2016 23:44:33 -0600 Subject: [PATCH 099/180] Fix macOS quick start preprocess script. (#390) * Use `gshuf` instead of `shuf` in macOS * Fix #388 --- demo/quick_start/preprocess.sh | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/demo/quick_start/preprocess.sh b/demo/quick_start/preprocess.sh index fe2acbbd74898..58a72147c5e41 100755 --- a/demo/quick_start/preprocess.sh +++ b/demo/quick_start/preprocess.sh @@ -21,14 +21,21 @@ set -e export LC_ALL=C +UNAME_STR=`uname` + +if [[ ${UNAME_STR} == 'Linux' ]]; then + SHUF_PROG='shuf' +else + SHUF_PROG='gshuf' +fi mkdir -p data/tmp python preprocess.py -i data/reviews_Electronics_5.json.gz # uniq and shuffle cd data/tmp echo 'uniq and shuffle...' -cat pos_*|sort|uniq|shuf> pos.shuffed -cat neg_*|sort|uniq|shuf> neg.shuffed +cat pos_*|sort|uniq|${SHUF_PROG}> pos.shuffed +cat neg_*|sort|uniq|${SHUF_PROG}> neg.shuffed min_len=`sed -n '$=' neg.shuffed` test_num=$((min_len/10)) @@ -42,8 +49,8 @@ head -n$train_num neg.shuffed >train.neg tail -n$test_num pos.shuffed >test.pos tail -n$test_num neg.shuffed >test.neg -cat train.pos train.neg|shuf>../train.txt -cat test.pos test.neg|shuf>../test.txt +cat train.pos train.neg | ${SHUF_PROG} >../train.txt +cat test.pos test.neg | ${SHUF_PROG} >../test.txt cd - echo 'data/train.txt' > data/train.list From a07da94939695986e2c15858f61199ca60c37fec Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 8 Nov 2016 14:19:56 +0800 Subject: [PATCH 100/180] fix floating-point overflow problem of tanh (#355) --- paddle/cuda/include/hl_base.h | 9 ++ paddle/cuda/src/hl_avx_functions.cc | 2 + paddle/cuda/src/hl_cpu_functions.cc | 4 +- paddle/gserver/tests/test_LayerGrad.cpp | 2 +- paddle/gserver/tests/test_RecurrentLayer.cpp | 2 +- paddle/math/BaseMatrix.cu | 5 +- paddle/math/MathFunctions.cpp | 5 +- paddle/math/Matrix.cpp | 9 -- paddle/math/tests/CMakeLists.txt | 1 + paddle/math/tests/test_FPException.cpp | 94 ++++++++++++++++++++ 10 files changed, 119 insertions(+), 14 deletions(-) create mode 100644 paddle/math/tests/test_FPException.cpp diff --git a/paddle/cuda/include/hl_base.h b/paddle/cuda/include/hl_base.h index 1fe2774cc5a29..02fa6bc3ace32 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/cuda/include/hl_base.h @@ -209,6 +209,15 @@ typedef struct { #define HL_FLOAT_MIN 2.2250738585072014e-308 #endif + +/** + * The maximum input value for exp, used to avoid overflow problem. + * + * Currently only used for tanh function. + */ +#define EXP_MAX_INPUT 40.0 + + /** * @brief DIVUP(x, y) is similar to ceil(x / y). * @note For CUDA, DIVUP will be used to specify diff --git a/paddle/cuda/src/hl_avx_functions.cc b/paddle/cuda/src/hl_avx_functions.cc index 2d471206f61f2..08976180fff5b 100644 --- a/paddle/cuda/src/hl_avx_functions.cc +++ b/paddle/cuda/src/hl_avx_functions.cc @@ -38,7 +38,9 @@ namespace hppl { } __m256 tanh(const __m256 a) { + __m256 max = _mm256_set1_ps(EXP_MAX_INPUT); __m256 tmp = _mm256_mul_ps(_mm256_set1_ps(-2.0f), a); + tmp = _mm256_min_ps(tmp, max); tmp = exp(tmp); return _mm256_sub_ps( _mm256_div_ps(_mm256_set1_ps(2.0f), diff --git a/paddle/cuda/src/hl_cpu_functions.cc b/paddle/cuda/src/hl_cpu_functions.cc index 3fd6b278d0537..b8352c2d537fb 100644 --- a/paddle/cuda/src/hl_cpu_functions.cc +++ b/paddle/cuda/src/hl_cpu_functions.cc @@ -30,7 +30,9 @@ namespace hppl { } real tanh(const real a) { - return (2.0 / (1.0 + exp(-2.0*a))) - 1.0; + real tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + return (2.0 / (1.0 + exp(tmp))) - 1.0; } real linear(const real a) { diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 5397b952bced8..4e01fa91ed2ba 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -995,7 +995,7 @@ TEST(Layer, LstmLayer) { TestConfig config; config.layerConfig.set_type("lstmemory"); config.layerConfig.set_size(4); - config.layerConfig.set_active_type("sigmoid"); + config.layerConfig.set_active_type("tanh"); config.layerConfig.set_active_state_type("sigmoid"); config.layerConfig.set_active_gate_type("sigmoid"); config.biasSize = 28; diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/gserver/tests/test_RecurrentLayer.cpp index 9b933b153d158..1c8497e8c526f 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/gserver/tests/test_RecurrentLayer.cpp @@ -369,7 +369,7 @@ TEST(Layer, LstmLayer) { LayerConfig layerConfig; layerConfig.set_type("lstmemory"); layerConfig.set_active_type("relu"); - layerConfig.set_active_state_type("sigmoid"); + layerConfig.set_active_state_type("tanh"); layerConfig.set_active_gate_type("sigmoid"); layerConfig.add_inputs(); diff --git a/paddle/math/BaseMatrix.cu b/paddle/math/BaseMatrix.cu index 8b888b1ee5e46..d81b99e544158 100644 --- a/paddle/math/BaseMatrix.cu +++ b/paddle/math/BaseMatrix.cu @@ -625,7 +625,10 @@ void BaseMatrixT::squareDerivative(BaseMatrixT& b) { applyBinary(binary::SquareDerivative(), b); } -DEFINE_MATRIX_BINARY_OP(Tanh, b = 2.0 / (1.0 + exp(-2 * a)) - 1.0); +DEFINE_MATRIX_BINARY_OP(Tanh, + T tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + b = 2.0 / (1.0 + std::exp(tmp)) - 1.0); template<> void BaseMatrixT::tanh(BaseMatrixT& b) { applyBinary(binary::Tanh(), b); diff --git a/paddle/math/MathFunctions.cpp b/paddle/math/MathFunctions.cpp index f8132066477db..e0b2a2bb5b2cd 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/math/MathFunctions.cpp @@ -200,7 +200,10 @@ void vLog1p(const int n, const T* a, T* r) { binary::vLog1p(), const_cast(a), r, 1, n, n, n); } -DEFINE_MATRIX_BINARY_OP(vTanh, b = 2.0 / (1.0 + std::exp(-2 * a)) - 1.0); +DEFINE_MATRIX_BINARY_OP(vTanh, + T tmp = -2.0 * a; + tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; + b = 2.0 / (1.0 + std::exp(tmp)) - 1.0); template void vTanh(const int n, const T* a, T* r) { hl_cpu_apply_binary_op, 0, 0>( diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index d901ba93492ac..4fc9b2d089366 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -3471,9 +3471,7 @@ void CpuMatrix::tanh(Matrix& output) { size_t dim = getWidth(); CHECK_EQ(output.getHeight(), numSamples); CHECK_EQ(output.getWidth(), dim); - errno = 0; vTanh(numSamples * dim, getData(), output.getData()); - CHECK_EQ(errno, 0) << "vTanh error"; } void CpuMatrix::tanhDerivative(Matrix& output) { @@ -3495,10 +3493,8 @@ void CpuMatrix::softrelu(Matrix& output) { out[j] = x; } } - errno = 0; vExp(numSamples * dim, output.getData(), output.getData()); vLog1p(numSamples * dim, output.getData(), output.getData()); - CHECK_EQ(errno, 0) << "vExp+vLog1p error"; } void CpuMatrix::softreluDerivative(Matrix& output) { @@ -3513,9 +3509,7 @@ void CpuMatrix::softreluDerivative(Matrix& output) { MatrixPtr tmpMat = Matrix::create(numSamples, dim); real* tmp = tmpMat->getData(); - errno = 0; vExp(size, output.getData(), tmpMat->getData()); - CHECK_EQ(errno, 0) << "vExp error"; for (size_t i = 0; i < size; ++i) { grad[i] *= (1.0 - 1.0 / tmp[i]); @@ -3538,10 +3532,7 @@ void CpuMatrix::scaledTanh(Matrix& output, real p1, real p2) { out[i] = p2 * in[i]; } - // out = tanh(out) - errno = 0; vTanh(numSamples * dim, out, out); - CHECK_EQ(errno, 0) << "vTanh error"; // out = p1 * out for (size_t i = 0; i < numSamples * dim; ++i) { diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/math/tests/CMakeLists.txt index eb72f11e1c653..247be983ba329 100644 --- a/paddle/math/tests/CMakeLists.txt +++ b/paddle/math/tests/CMakeLists.txt @@ -13,3 +13,4 @@ add_simple_unittest(test_sparseMatrixCompare) add_simple_unittest(test_perturbation) add_simple_unittest(test_CpuGpuVector) add_simple_unittest(test_Allocator) +add_simple_unittest(test_FPException) diff --git a/paddle/math/tests/test_FPException.cpp b/paddle/math/tests/test_FPException.cpp new file mode 100644 index 0000000000000..174278c2aaac4 --- /dev/null +++ b/paddle/math/tests/test_FPException.cpp @@ -0,0 +1,94 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +/** + * This test is about floating point calculation exception. + * Paddle catches FE_INVALID, FE DIVBYZERO and FE_OVERFLOW exceptions. + * + * Some exceptions occur in the middle of a set of formulas, + * that can be circumvented by some tricks. + * For example, + * calculate tanh + * b = 2.0 / (1.0 + exp(-2 * a)) - 1.0 + * + * If the result of (-2 * a) is too large, + * a FE_OVERFLOW exception occurs when calculating exp. + * But the result of tanh is no overflow problem, + * so we can add some tricks to prevent exp calculate an excessive value. + * + */ +#include +#include +#include "paddle/math/Matrix.h" +#include "paddle/utils/Excepts.h" + +using namespace paddle; // NOLINT + +void SetTensorValue(Matrix& matrix, real value) { + int height = matrix.getHeight(); + int width = matrix.getWidth(); + int stride = matrix.getStride(); + real* data = matrix.getData(); + for (int i = 0; i < height; i++) { + int j = rand() % width; // NOLINT + if (typeid(matrix) == typeid(CpuMatrix)) { + data[i * stride + j] = value; + } else if (typeid(matrix) == typeid(GpuMatrix)) { + hl_memcpy(&data[i * stride + j], &value, sizeof(real)); + } else { + LOG(FATAL) << "should not reach here"; + } + } +} + +template +void testTanh(real illegal) { + MatrixPtr A = std::make_shared(10, 10); + MatrixPtr B = std::make_shared(10, 10); + A->randomizeUniform(); + B->randomizeUniform(); + + SetTensorValue(*A, illegal); + + A->tanh(*B); +} + +template +void testSigmoid(real illegal) { + MatrixPtr A = std::make_shared(10, 10); + MatrixPtr B = std::make_shared(10, 10); + A->randomizeUniform(); + B->randomizeUniform(); + + SetTensorValue(*A, illegal); + + A->sigmoid(*B); +} + +TEST(fp, overflow) { + for (auto illegal : {-90.0, 90.0}) { + LOG(INFO) << " illegal=" << illegal; + testTanh(illegal); + testSigmoid(illegal); + } +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + + feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW); + return RUN_ALL_TESTS(); +} From f06f4dfbaee1696b0d789693095b09babf003d90 Mon Sep 17 00:00:00 2001 From: backyes Date: Tue, 8 Nov 2016 14:52:37 +0800 Subject: [PATCH 101/180] py_paddle link zlib(#393) --- paddle/api/paddle_api_config.py.in | 1 + paddle/api/paddle_ld_flags.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in index 6531e5ccb3dba..27e71c86605b6 100644 --- a/paddle/api/paddle_api_config.py.in +++ b/paddle/api/paddle_api_config.py.in @@ -1,6 +1,7 @@ PADDLE_BUILD_DIR="@CMAKE_CURRENT_BINARY_DIR@/../" WITH_GPU="@WITH_GPU@" PROTOBUF_LIB="@PROTOBUF_LIBRARY@" +ZLIB_LIB="@ZLIB_LIBRARIES@" CMAKE_THREAD_LIB="@CMAKE_THREAD_LIBS_INIT@" CMAKE_DL_LIBS="@CMAKE_DL_LIBS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index bc1afc5898e82..e51f65bcf341b 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -38,6 +38,7 @@ def __init__(self): self.paddle_build_dir = os.path.abspath(self.paddle_build_dir) self.with_gpu = PaddleLDFlag.cmake_bool(WITH_GPU) self.protolib = PROTOBUF_LIB + self.zlib = ZLIB_LIB self.thread = CMAKE_THREAD_LIB self.dl_libs = CMAKE_DL_LIBS self.with_python = PaddleLDFlag.cmake_bool(WITH_PYTHON) @@ -64,7 +65,7 @@ def libs_dir_str(self): def parent_dir_str(self): libdirs = PARENT_LIB_DIRS - return " ".join(map(lambda x: "-L" + os.path.join(self.paddle_build_dir, '..', x), + return " ".join(map(lambda x: "-L" + os.path.join(self.paddle_build_dir, '..', x), libdirs)) def libs_str(self): @@ -82,6 +83,7 @@ def libs_str(self): "-lpaddle_cuda", "-lpaddle_api", self.normalize_flag(self.protolib), + self.normalize_flag(self.zlib), self.normalize_flag(self.thread), self.normalize_flag(self.dl_libs), self.normalize_flag(self.cblas_libs), From 57bc6238d93b50632a644620b62fdb81698b5eb7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Tue, 8 Nov 2016 01:35:21 -0600 Subject: [PATCH 102/180] enable swig unittest in travis-ci (#394) * Init * Add numpy deps * Refine --- .travis.yml | 3 ++- paddle/api/paddle_api_config.py.in | 1 + paddle/api/paddle_ld_flags.py | 11 ++++++++++- paddle/scripts/travis/build_and_test.sh | 2 ++ paddle/setup.py.in | 13 ++++++++++--- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index b5a00bb2ca0ec..74aa767febeb1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,6 +38,7 @@ addons: - curl - lcov - graphviz + - swig before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then @@ -51,7 +52,7 @@ before_install: fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi - - pip install wheel protobuf sphinx breathe recommonmark + - pip install wheel protobuf sphinx breathe recommonmark virtualenv numpy script: - paddle/scripts/travis/main.sh notifications: diff --git a/paddle/api/paddle_api_config.py.in b/paddle/api/paddle_api_config.py.in index 27e71c86605b6..a2352250c31ef 100644 --- a/paddle/api/paddle_api_config.py.in +++ b/paddle/api/paddle_api_config.py.in @@ -16,3 +16,4 @@ GFLAGS_LOCATION="@GFLAGS_LOCATION@" CBLAS_LIBRARIES="@CBLAS_LIBS@" CUDA_LIBRARIES="@CUDA_LIBRARIES@" +WITH_COVERALLS="@ON_COVERALLS@" diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index e51f65bcf341b..05d741f8859ba 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -48,6 +48,7 @@ def __init__(self): self.glog_libs = LIBGLOG_LIBRARY self.with_gflags = PaddleLDFlag.cmake_bool(WITH_GFLAGS) + self.with_coverage = PaddleLDFlag.cmake_bool(WITH_COVERALLS) self.gflags_libs = GFLAGS_LIBRARIES self.gflags_location = GFLAGS_LOCATION self.cblas_libs = CBLAS_LIBRARIES @@ -97,6 +98,8 @@ def libs_str(self): libs.append(self.normalize_flag(self.gflags_libs)) if self.with_gpu: libs.append(self.normalize_flag(self.curt)) + if self.with_coverage: + libs.append("-fprofile-arcs") return " ".join(filter(lambda l: len(l) != 0, libs)) def normalize_flag(self, cmake_flag): @@ -133,8 +136,14 @@ def cmake_bool(cmake_str): return False else: return True - + def c_flag(self): + if self.with_coverage: + return ["-fprofile-arcs", "-ftest-coverage", "-O0", "-g"] + else: + return None except ImportError: class PaddleLDFlag(object): def ldflag_str(self): pass + def c_flag(self): + pass diff --git a/paddle/scripts/travis/build_and_test.sh b/paddle/scripts/travis/build_and_test.sh index 54e3320c8c158..242fd982aa001 100755 --- a/paddle/scripts/travis/build_and_test.sh +++ b/paddle/scripts/travis/build_and_test.sh @@ -3,6 +3,8 @@ source ./common.sh CMAKE_EXTRA="" if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then CMAKE_EXTRA="-DPYTHON_LIBRARY=/usr/local/Cellar/python/2.7.12_1/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config/libpython2.7.dylib" +else + CMAKE_EXTRA="-DWITH_SWIG_PY=ON" fi diff --git a/paddle/setup.py.in b/paddle/setup.py.in index 3341dd6f95969..1a15eafd5528a 100644 --- a/paddle/setup.py.in +++ b/paddle/setup.py.in @@ -31,8 +31,8 @@ is_lin = (system == 'linux') # because generate paddle LDFLAGS is too complicated to do in setup.py # it just read COMAKE generated LDFLAGS. extra_links = [] -ldflags = api.paddle_ld_flags.PaddleLDFlag() -ldflags = ldflags.ldflag_str() +obj = api.paddle_ld_flags.PaddleLDFlag() +ldflags = obj.ldflag_str() if ldflags is not None: extra_links.extend(ldflags.split(" ")) @@ -51,13 +51,20 @@ elif is_osx == True: include_dirs = [np.get_include(), "../"] # include numpy and paddle. +extra_c = obj.c_flag() + +attr=dict() +if extra_c is not None: + attr["extra_compile_args"] = extra_c + setup(name="py_paddle", version="@PADDLE_VERSION@", ext_modules=[ Extension('py_paddle._swig_paddle', # Build SWIG Extension. ['Paddle_wrap.cxx'], include_dirs = include_dirs, - extra_link_args = extra_links + extra_link_args = extra_links, + **attr ) ], packages=['py_paddle'], From 125c19a3c31a6290d7feea3faf41dd20fb97e2f4 Mon Sep 17 00:00:00 2001 From: hedaoyuan Date: Tue, 8 Nov 2016 15:35:57 +0800 Subject: [PATCH 103/180] fix some nvcc compile options (#392) --- CMakeLists.txt | 15 +++++++++++++++ cmake/util.cmake | 8 -------- paddle/gserver/CMakeLists.txt | 2 +- paddle/math/CMakeLists.txt | 2 +- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 282e3e199ef44..39f876bc9ee4b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -95,11 +95,26 @@ if(NOT WITH_GPU) add_definitions(-DHPPL_STUB_FUNC) list(APPEND CMAKE_CXX_SOURCE_FILE_EXTENSIONS cu) else() + if(${CUDA_VERSION_MAJOR} GREATER 6) + if(COMPILER_SUPPORT_CXX11) + LIST(APPEND CUDA_NVCC_FLAGS -std=c++11) + endif() + endif() + # TODO(yuyang18): Change it to remove std=c++11 in cuda compile. set(CUDA_PROPAGATE_HOST_FLAGS OFF) if(NOT CUDNN_FOUND) message(FATAL_ERROR "Paddle need cudnn to compile") endif() + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-g -O3 --use_fast_math") + + if(WITH_AVX) + if(AVX_FOUND) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler -mavx") + endif(AVX_FOUND) + else(WITH_AVX) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler -msse3") + endif(WITH_AVX) if(WITH_DSO) set(CUDA_LIBRARIES "") diff --git a/cmake/util.cmake b/cmake/util.cmake index 3f78cd08c3905..a8282f07184c3 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -188,14 +188,6 @@ macro(add_simple_unittest TARGET_NAME) add_unittest(${TARGET_NAME} ${TARGET_NAME}.cpp) endmacro() -macro(add_paddle_culib TARGET_NAME) - set(NVCC_FLAG ${CUDA_NVCC_FLAGS}) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};--use_fast_math) - cuda_add_library(${TARGET_NAME} STATIC ${ARGN}) - set(CUDA_NVCC_FLAGS ${NVCC_FLAG}) -endmacro() - - # Creates C resources file from files in given resource file function(create_resources res_file output) # Create empty output file diff --git a/paddle/gserver/CMakeLists.txt b/paddle/gserver/CMakeLists.txt index 9ac4d210f6d37..a066f80c221ee 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/gserver/CMakeLists.txt @@ -50,7 +50,7 @@ if(NOT WITH_PYTHON) endif() if(WITH_GPU) - add_paddle_culib(paddle_gserver ${GSERVER_SOURCES}) + cuda_add_library(paddle_gserver ${GSERVER_SOURCES}) else() add_library(paddle_gserver STATIC ${GSERVER_SOURCES}) diff --git a/paddle/math/CMakeLists.txt b/paddle/math/CMakeLists.txt index db305812a7c03..93b1bf46a1007 100644 --- a/paddle/math/CMakeLists.txt +++ b/paddle/math/CMakeLists.txt @@ -23,7 +23,7 @@ if(NOT WITH_GPU) add_library(paddle_math STATIC ${MATH_SOURCES}) else() - add_paddle_culib(paddle_math ${MATH_SOURCES}) + cuda_add_library(paddle_math ${MATH_SOURCES}) endif() From db1757556e0712ab74b23c5f048768c952bf59a9 Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 8 Nov 2016 17:20:02 +0800 Subject: [PATCH 104/180] Follow comments --- paddle/cuda/src/hl_cuda_cnn.cu | 16 ++--- paddle/gserver/layers/BilinearInterpLayer.cpp | 9 ++- paddle/gserver/layers/BilinearInterpLayer.h | 1 + paddle/gserver/tests/test_LayerGrad.cpp | 10 +++ paddle/math/Matrix.cpp | 69 ++++++++----------- paddle/math/Matrix.h | 24 +++++-- paddle/math/tests/test_matrixCompare.cpp | 11 +-- .../paddle/trainer_config_helpers/layers.py | 17 +++-- 8 files changed, 88 insertions(+), 69 deletions(-) diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/cuda/src/hl_cuda_cnn.cu index 49c09334e086d..9eec44f77f27a 100644 --- a/paddle/cuda/src/hl_cuda_cnn.cu +++ b/paddle/cuda/src/hl_cuda_cnn.cu @@ -532,8 +532,7 @@ void hl_CMRNorm_backward(size_t frameCnt, const real* inV, CHECK_SYNC("hl_CMRNorm_backward"); } -__global__ void KeBilinearInterpFw(const size_t nthreads, - const real* in, +__global__ void KeBilinearInterpFw(const real* in, const size_t inImgH, const size_t inImgW, const size_t inputH, @@ -546,6 +545,7 @@ __global__ void KeBilinearInterpFw(const size_t nthreads, const size_t numChannels, const real ratioH, const real ratioW) { + int nthreads = outputH * outputW; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < nthreads) { int outIdH = tid / outputW; @@ -593,13 +593,12 @@ void hl_bilinear_forward(const real* inData, int blocks = (threadNum + 1024 - 1) / 1024; KeBilinearInterpFw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( - threadNum, inData, inImgH, inImgW, inputH, inputW, outData, - outImgH, outImgW, outputH, outputW, numChannels, ratioH, ratioW); + inData, inImgH, inImgW, inputH, inputW, outData, outImgH, + outImgW, outputH, outputW, numChannels, ratioH, ratioW); CHECK_SYNC("hl_bilinear_forward failed"); } -__global__ void KeBilinearInterpBw(const size_t nthreads, - real* in, +__global__ void KeBilinearInterpBw(real* in, const size_t inImgH, const size_t inImgW, const size_t inputH, @@ -612,6 +611,7 @@ __global__ void KeBilinearInterpBw(const size_t nthreads, const size_t numChannels, const real ratioH, const real ratioW) { + int nthreads = outputH * outputW; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < nthreads) { int outIdH = tid / outputW; @@ -659,8 +659,8 @@ void hl_bilinear_backward(real* inGrad, int blocks = (threadNum + 1024 - 1) / 1024; KeBilinearInterpBw<<< blocks, 1024, 0, STREAM_DEFAULT>>>( - threadNum, inGrad, inImgH, inImgW, inputH, inputW, outGrad, - outImgH, outImgW, outputH, outputW, numChannels, ratioH, ratioW); + inGrad, inImgH, inImgW, inputH, inputW, outGrad, outImgH, + outImgW, outputH, outputW, numChannels, ratioH, ratioW); CHECK_SYNC("hl_bilinear_backward failed"); } diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/gserver/layers/BilinearInterpLayer.cpp index f37efc824a2ec..ac5f87be7af07 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/gserver/layers/BilinearInterpLayer.cpp @@ -40,6 +40,11 @@ size_t BilinearInterpLayer::getSize() { CHECK(inImgH_ > 0 && inImgW_ > 0); CHECK(numChannels_); + ratioH_ = (outImgH_ > 1) ? + static_cast(inImgH_ - 1) / (outImgH_ - 1) : 0.f; + ratioW_ = (outImgW_ > 1) ? + static_cast(inImgW_ - 1) / (outImgW_ - 1) : 0.f; + getOutput().setFrameHeight(outImgH_); getOutput().setFrameWidth(outImgW_); return outImgH_ * outImgW_ * numChannels_; @@ -70,7 +75,7 @@ void BilinearInterpLayer::forward(PassType passType) { { REGISTER_TIMER_INFO("FwBilinearInterpTimer", getName().c_str()); outV->bilinearForward(*inV, inImgH_, inImgW_, outImgH_, outImgW_, - numChannels_); + numChannels_, ratioH_, ratioW_); } } @@ -83,7 +88,7 @@ void BilinearInterpLayer::backward(const UpdateCallback& callback) { REGISTER_TIMER_INFO("BwBilinearInterpTimer", getName().c_str()); if (inputG) { inputG->bilinearBackward(*outG, outImgH_, outImgW_, inImgH_, inImgW_, - numChannels_); + numChannels_, ratioH_, ratioW_); } } } diff --git a/paddle/gserver/layers/BilinearInterpLayer.h b/paddle/gserver/layers/BilinearInterpLayer.h index 33e0cb1220511..eba3c054fa8e7 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.h +++ b/paddle/gserver/layers/BilinearInterpLayer.h @@ -29,6 +29,7 @@ class BilinearInterpLayer : public Layer { protected: size_t outImgH_, outImgW_; size_t inImgH_, inImgW_; + real ratioH_, ratioW_; size_t numChannels_; public: diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index c00190449984c..4d4e439dc6268 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -50,6 +50,16 @@ TEST(Layer, BilinearInterpLayer) { for (auto useGpu : {false, true}) { testLayerGrad(config, "bilinear_interp", 10, false, useGpu); } + + bilinear->set_img_size_x(32); + bilinear->set_img_size_y(32); + bilinear->set_out_size_x(32); + bilinear->set_out_size_y(32); + bilinear->set_num_channels(4); + + for (auto useGpu : {false, true}) { + testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + } } TEST(Operator, dot_mul) { diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 283733fe845d4..9abcbba67ab7e 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1227,7 +1227,9 @@ void GpuMatrix::bilinearForward(const Matrix& in, const size_t inImgW, const size_t outImgH, const size_t outImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { CHECK(dynamic_cast(&in)); const size_t outputW = getWidth(); @@ -1238,11 +1240,6 @@ void GpuMatrix::bilinearForward(const Matrix& in, real* outData = getData(); const real* inData = in.getData(); - real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; - if (inImgH == outImgW && inImgW == outImgW) { this->copyFrom(in); } else { @@ -1258,7 +1255,9 @@ void GpuMatrix::bilinearBackward(const Matrix& out, const size_t outImgW, const size_t inImgH, const size_t inImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { CHECK(dynamic_cast(&out)); const size_t inputW = getWidth(); @@ -1269,13 +1268,8 @@ void GpuMatrix::bilinearBackward(const Matrix& out, real* inGrad = getData(); const real* outGrad = out.getData(); - real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; - if (outImgH == inImgH && outImgW == inImgW) { - this->addBias(const_cast(out), 1.f); + this->add(const_cast(out)); } else { hl_bilinear_backward( inGrad, inImgH, inImgW, inputH, inputW, outGrad, @@ -3908,7 +3902,9 @@ void CpuMatrix::bilinearForward(const Matrix& in, const size_t inImgW, const size_t outImgH, const size_t outImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { CHECK(dynamic_cast(&in)); size_t outputW = getWidth(); @@ -3920,11 +3916,6 @@ void CpuMatrix::bilinearForward(const Matrix& in, real* outData = getData(); const real* inData = in.getData(); - const real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - const real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; - if (inImgH == outImgH && inImgW == outImgW) { this->copyFrom(in); } else { @@ -3932,21 +3923,23 @@ void CpuMatrix::bilinearForward(const Matrix& in, for (size_t i = 0; i < outImgH; ++i) { // loop for images size_t h = ratioH * i; size_t hid = (h < inImgH - 1) ? 1 : 0; - real hlambda = ratioH * i - h; + real h1lambda = ratioH * i - h; + real h2lambda = 1 - h1lambda; for (size_t j = 0; j < outImgW; ++j) { size_t w = ratioW * j; size_t wid = (w < inImgW - 1) ? 1 : 0; - real wlambda = ratioW * j - w; + real w1lambda = ratioW * j - w; + real w2lambda = 1 - w1lambda; // calculate four position for bilinear interpolation const real* inPos = &inData[k * inputW + h * inImgW + w]; real* outPos = &outData[k * outputW + i * outImgW + j]; for (size_t c = 0; c < numChannels; ++c) { // loop for channels // bilinear interpolation - outPos[0] = (1.f - hlambda) * - ((1.f - wlambda) * inPos[0] + wlambda * inPos[wid]) + - hlambda * ((1.f - wlambda) * inPos[hid * inImgW] + - wlambda * inPos[hid * inImgW + wid]); + outPos[0] = + h2lambda * (w2lambda * inPos[0] + w1lambda * inPos[wid]) + + h1lambda * (w2lambda * inPos[hid * inImgW] + + w1lambda * inPos[hid * inImgW + wid]); inPos += inImgH * inImgW; outPos += outImgH * outImgW; } @@ -3961,7 +3954,9 @@ void CpuMatrix::bilinearBackward(const Matrix& out, const size_t outImgW, const size_t inImgH, const size_t inImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { CHECK(dynamic_cast(&out)); size_t inputW = getWidth(); @@ -3973,32 +3968,28 @@ void CpuMatrix::bilinearBackward(const Matrix& out, real* inGrad = getData(); const real* outGrad = out.getData(); - const real ratioH = (outImgH > 1) ? - static_cast(inImgH - 1) / (outImgH - 1) : 0.f; - const real ratioW = (outImgW > 1) ? - static_cast(inImgW - 1) / (outImgW - 1) : 0.f; - if (inImgH == outImgH && inImgW == outImgW) { - this->addBias(const_cast(out), 1.f); + this->add(const_cast(out)); } else { for (size_t k = 0; k < batchSize; ++k) { // loop for batches for (size_t i = 0; i < outImgH; ++i) { // loop for images size_t h = ratioH * i; size_t hid = (h < inImgH - 1) ? 1 : 0; - real hlambda = ratioH * i - h; - + real h1lambda = ratioH * i - h; + real h2lambda = 1 - h1lambda; for (size_t j = 0; j < outImgW; ++j) { size_t w = ratioW * j; size_t wid = (w < inImgW - 1) ? 1 : 0; - real wlambda = ratioW * j - w; + real w1lambda = ratioW * j - w; + real w2lambda = 1 - w1lambda; real* inPos = &inGrad[k * inputW + h * inImgW + w]; const real* outPos = &outGrad[k * outputW + i * outImgW + j]; for (size_t c = 0; c < numChannels; ++c) { // loop for channels - inPos[0] += (1.f - hlambda) * (1.f - wlambda) * outPos[0]; - inPos[wid] += (1.f - hlambda) * wlambda * outPos[0]; - inPos[hid * inImgW] += hlambda * (1.f - wlambda) * outPos[0]; - inPos[hid * inImgW + wid] += hlambda * wlambda * outPos[0]; + inPos[0] += h2lambda * w2lambda * outPos[0]; + inPos[wid] += h2lambda * w1lambda * outPos[0]; + inPos[hid * inImgW] += h1lambda * w2lambda * outPos[0]; + inPos[hid * inImgW + wid] += h1lambda * w1lambda * outPos[0]; inPos += inImgH * inImgW; outPos += outImgH * outImgW; } diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 25748a15696e1..07a2aebf556ef 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -997,7 +997,9 @@ class Matrix : public BaseMatrix { const size_t inImgW, const size_t outImgH, const size_t outImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { LOG(FATAL) << "Not implemented"; } virtual void bilinearBackward(const Matrix& out, @@ -1005,7 +1007,9 @@ class Matrix : public BaseMatrix { const size_t outImgW, const size_t inImgH, const size_t inImgW, - const size_t numChannels) { + const size_t numChannels, + const real ratioH, + const real ratioW) { LOG(FATAL) << "Not implemented"; } }; @@ -1283,14 +1287,18 @@ class GpuMatrix : public Matrix { const size_t inImgW, const size_t outImgH, const size_t outImgW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); void bilinearBackward(const Matrix& out, const size_t outImgH, const size_t outImgW, const size_t inImgH, const size_t inImgW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); }; class CpuMatrix : public Matrix { @@ -1583,14 +1591,18 @@ class CpuMatrix : public Matrix { const size_t inImgW, const size_t outImgH, const size_t outImgW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); void bilinearBackward(const Matrix& out, const size_t outImgH, const size_t outImgW, const size_t inImgH, const size_t inImgW, - const size_t numChannels); + const size_t numChannels, + const real ratioH, + const real ratioW); }; class SharedCpuMatrix : public CpuMatrix { diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index ef22e2aa8dd17..017fddc799591 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -94,7 +94,8 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, int channels) { int inWidth = imgSizeH * imgSizeW * channels; int outWidth = 2 * imgSizeH * 2 * imgSizeW * channels; - + real ratioH = 0.5; + real ratioW = 0.5; // forward MatrixPtr input = CpuMatrix::create(numSamples, inWidth, false, false); MatrixPtr inputGpu = GpuMatrix::create(numSamples, inWidth, false, true); @@ -107,9 +108,9 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, inputGpu->copyFrom(*input); target->bilinearForward(*input, imgSizeH, imgSizeW, - 2 * imgSizeH, 2 * imgSizeW, channels); + 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); targetGpu->bilinearForward(*inputGpu, imgSizeH, imgSizeW, - 2 * imgSizeH, 2 * imgSizeW, channels); + 2 * imgSizeH, 2 * imgSizeW, channels, ratioH, ratioW); // check targetCheck->copyFrom(*targetGpu); @@ -131,9 +132,9 @@ void testBilinearFwdBwd(int numSamples, int imgSizeH, int imgSizeW, targetGpuGrad->copyFrom(*targetGrad); inputGrad->bilinearBackward(*targetGrad, 2 * imgSizeH, 2 * imgSizeW, - imgSizeH, imgSizeW, channels); + imgSizeH, imgSizeW, channels, ratioH, ratioW); inputGpuGrad->bilinearBackward(*targetGpuGrad, 2 * imgSizeH, 2 * imgSizeW, - imgSizeH, imgSizeW, channels); + imgSizeH, imgSizeW, channels, ratioH, ratioW); // check targetCheckGrad->copyFrom(*inputGpuGrad); diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 038f4d32a588e..ccfdb3ded3ba1 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1272,19 +1272,17 @@ def bilinear_interp_layer(input, .. code-block:: python - bilinear = bilinear_interp_layer(input, - out_size_x, - out_size_y) + bilinear = bilinear_interp_layer(input=layer1, out_size_x=64, out_size_y=64) - :para input: A input layer. + :param input: A input layer. :type input: LayerOutput. - :para out_size_x: bilinear interpolation output width. + :param out_size_x: bilinear interpolation output width. :type out_size_x: int|None - :para out_size_y: bilinear interpolation output height. + :param out_size_y: bilinear interpolation output height. :type out_size_y: int|None - :para name: The layer's name, which cna not be specified. + :param name: The layer's name, which cna not be specified. :type name: None|basestring - :para layer_attr: Extra Layer attribute. + :param layer_attr: Extra Layer attribute. :type layer_attr: ExtraLayerAttribute :return: LayerOutput object. :rtype: LayerOutput @@ -1301,7 +1299,8 @@ def bilinear_interp_layer(input, num_channels=num_channels)), type=LayerType.BILINEAR_INTERP_LAYER, **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input]) + return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], + num_filters=num_channels) @wrap_name_default() @layer_support() From 65af9f949532347c3ed47ba1fe47604d9cf3e7b7 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Tue, 8 Nov 2016 17:39:27 +0800 Subject: [PATCH 105/180] modify the format of diff information in protostr (#398) --- python/paddle/trainer_config_helpers/tests/configs/run_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh index f05fc46cd5520..9683288358426 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/run_tests.sh @@ -13,5 +13,5 @@ for file in $files do base_protostr=$protostr/$file new_protostr=$protostr/$file.unitest - diff $base_protostr $new_protostr + diff $base_protostr $new_protostr -u done From 7f6e9acab05ff407371cec41661de84afd8d0f1d Mon Sep 17 00:00:00 2001 From: liaogang Date: Tue, 8 Nov 2016 17:41:58 +0800 Subject: [PATCH 106/180] Fix minior bug * add patch does not trigger travis ci --- .travis.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 74aa767febeb1..ffe3bc193b49e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,13 +42,11 @@ addons: before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - TRAVIS_COMMIT_RANGE="FETCH_HEAD...$TRAVIS_BRANCH" - fi - git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' || { + if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' + then echo "Only markdown docs were updated, stopping build process." exit - } + fi fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi From e2c071358914fdead0a86348fd91c8227e78f280 Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 8 Nov 2016 10:12:04 +0000 Subject: [PATCH 107/180] follow comments --- paddle/cuda/include/hl_cnn.h | 8 +- paddle/gserver/layers/PoolProjection.cpp | 22 ++-- paddle/gserver/layers/PoolProjection.h | 1 + .../layers/SpatialPyramidPoolLayer.cpp | 14 ++- .../paddle/trainer_config_helpers/layers.py | 112 +++++++++--------- .../tests/configs/generate_protostr.sh | 2 - 6 files changed, 81 insertions(+), 78 deletions(-) diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/cuda/include/hl_cnn.h index 4bd9d5e7c9e90..de6c2fb8f281a 100644 --- a/paddle/cuda/include/hl_cnn.h +++ b/paddle/cuda/include/hl_cnn.h @@ -91,7 +91,7 @@ extern void hl_expand_feature2col( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] tgtData output data. - * @param[in] tgtStride output data stride. + * @param[in] tgtStride stride between output data samples. * */ extern void hl_maxpool_forward( @@ -125,7 +125,7 @@ extern void hl_maxpool_forward( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] targetGrad output grad. - * @param[in] outStride output grad data stride. + * @param[in] outStride stride between output data samples. * */ extern void hl_maxpool_backward( @@ -157,7 +157,7 @@ extern void hl_maxpool_backward( * @param[in] paddingH padding height. * @param[in] paddingW padding width. * @param[out] tgtData output data. - * @param[in] tgtStride output data stride. + * @param[in] tgtStride stride between output data samples. * */ extern void hl_avgpool_forward( @@ -189,7 +189,7 @@ extern void hl_avgpool_forward( * @param[in] scaleA scale. * @param[in] scaleB scale. * @param[out] backGrad output grad. - * @param[in] outStride output grad data stride. + * @param[in] outStride stride between output data samples. * */ extern void hl_avgpool_backward( diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index e10788e926470..8c7d027c07250 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -34,9 +34,9 @@ PoolProjection* PoolProjection::create(const ProjectionConfig& config, void MaxPoolProjection::forward() { MatrixPtr inputV = in_->value; MatrixPtr outV = out_->value; - outV->maxPoolForward(*inputV, imgSizeY_, imgSize_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputY_, outputX_, confPaddingY_, confPadding_); + outV->maxPoolForward(*inputV, imgSizeY_, imgSize_, channels_, sizeX_, sizeY_, + strideY_, stride_, outputY_, outputX_, confPaddingY_, + confPadding_); } void MaxPoolProjection::backward(const UpdateCallback& callback) { @@ -50,17 +50,16 @@ void MaxPoolProjection::backward(const UpdateCallback& callback) { return; } inputGrad->maxPoolBackward(*inputV, imgSizeY_, imgSize_, *outGrad, *outV, - sizeX_, sizeY_, - strideY_, stride_, outputY_, outputX_, 1, 1, - confPaddingY_, confPadding_); + sizeX_, sizeY_, strideY_, stride_, outputY_, + outputX_, 1, 1, confPaddingY_, confPadding_); } void AvgPoolProjection::forward() { MatrixPtr inputV = in_->value; MatrixPtr outV = out_->value; - outV->avgPoolForward(*inputV, imgSizeY_, imgSize_, channels_, - sizeX_, sizeY_, strideY_, stride_, - outputY_, outputX_, confPaddingY_, confPadding_); + outV->avgPoolForward(*inputV, imgSizeY_, imgSize_, channels_, sizeX_, sizeY_, + strideY_, stride_, outputY_, outputX_, confPaddingY_, + confPadding_); } void AvgPoolProjection::backward(const UpdateCallback& callback) { @@ -73,9 +72,8 @@ void AvgPoolProjection::backward(const UpdateCallback& callback) { return; } - inputGrad->avgPoolBackward(*outputGrad, imgSizeY_, imgSize_, - sizeX_, sizeY_, strideY_, stride_, - outputY_, outputX_, 1, 1, + inputGrad->avgPoolBackward(*outputGrad, imgSizeY_, imgSize_, sizeX_, sizeY_, + strideY_, stride_, outputY_, outputX_, 1, 1, confPaddingY_, confPadding_); } } // namespace paddle diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h index 73d8a41aefabe..9fa16c1ea64c5 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/gserver/layers/PoolProjection.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include "Projection.h" +#include "paddle/math/MathUtils.h" namespace paddle { diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp index 7ec761364043b..846e2e0666030 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp @@ -56,8 +56,15 @@ ProjectionConfig SpatialPyramidPoolLayer::getConfig(size_t imgSizeW, size_t SpatialPyramidPoolLayer::getSize() { CHECK_EQ(inputLayers_.size(), 1UL); size_t layerSize = 0; + const SppConfig& sppConf = config_.inputs(0).spp_conf(); imgSizeH_ = inputLayers_[0]->getOutput().getFrameHeight(); imgSizeW_ = inputLayers_[0]->getOutput().getFrameWidth(); + if (imgSizeH_ == 0) { + imgSizeH_ = sppConf.has_img_size_y() ? sppConf.img_size_y() : imgSizeW_; + } + if (imgSizeW_ == 0) { + imgSizeW_ = sppConf.img_size(); + } size_t outputH = 1; size_t outputW = (std::pow(4, pyramidHeight_) - 1) / (4 - 1); @@ -66,10 +73,10 @@ size_t SpatialPyramidPoolLayer::getSize() { getOutput().setFrameHeight(outputH); getOutput().setFrameWidth(outputW); + return layerSize; } - bool SpatialPyramidPoolLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { Layer::init(layerMap, parameterMap); @@ -90,8 +97,8 @@ bool SpatialPyramidPoolLayer::init(const LayerMap& layerMap, size_t endCol = 0; for (size_t i = 0; i < pyramidHeight_; i++) { poolProjections_.emplace_back(PoolProjection::create( - getConfig(imgSizeW_, imgSizeH_, channels_, i, poolType_), - nullptr, useGpu_)); + getConfig(imgSizeW_, imgSizeH_, channels_, i, poolType_), nullptr, + useGpu_)); endCol += poolProjections_[i]->getOutputSize(); projCol_.push_back(std::make_pair(startCol, endCol)); startCol = endCol; @@ -125,4 +132,3 @@ void SpatialPyramidPoolLayer::backward(const UpdateCallback& callback) { } } // namespace paddle - diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index e6338e804536a..1459c9a84a56f 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -56,7 +56,7 @@ 'rank_cost', 'lambda_cost', 'huber_cost', 'block_expand_layer', 'maxout_layer', 'out_prod_layer', 'print_layer', - # 'spp_layer', + 'spp_layer', ] @@ -112,7 +112,7 @@ class LayerType(object): LINEAR_COMBINATION_LAYER = "convex_comb" BLOCK_EXPAND = "blockexpand" MAXOUT = "maxout" - # SPP_LAYER = "spp" + SPP_LAYER = "spp" PRINT_LAYER = "print" @@ -1711,60 +1711,60 @@ def img_pool_layer(input, pool_size, name=None, num_filters=num_channels) -# @wrap_name_default("spp") -# @layer_support() -# def spp_layer(input, name=None, num_channels=None, pool_type=None, -# pyramid_height=None, img_width=None, layer_attr=None): -# pass -# """ -# Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. -# The details please refer to -# `Kaiming He's paper `_. - -# :param name: layer name. -# :type name: basestring -# :param input: layer's input. -# :type input: LayerOutput -# :param num_channels: number of input channel. -# :type num_channels: int -# :param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling. -# :type scale: BasePoolingType -# :param pyramid_height: pyramid height. -# :type pyramid_height: int -# :param img_width: the width of input feature map. If it is None, the input feature -# map should be square. -# :type img_width: int|None -# :param layer_attr: Extra Layer Attribute. -# :type layer_attr: ExtraLayerAttribute -# :return: LayerOutput object. -# :rtype: LayerOutput -# """ -# if num_channels is None: -# assert input.num_filters is not None -# num_channels = input.num_filters - -# if pool_type is None: -# pool_type = MaxPooling() -# elif isinstance(pool_type, AvgPooling): -# pool_type.name = 'avg' - -# type_name = pool_type.name -# if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)): -# type_name += '-projection' - -# Layer( -# name=name, -# type=LayerType.SPP_LAYER, -# inputs=Input(input.name, -# spp=SpatialPyramidPool(pool_type=type_name, -# channels=num_channels, -# pyramid_height=pyramid_height, -# img_width=img_width) -# ), -# **ExtraLayerAttribute.to_kwargs(layer_attr) -# ) -# return LayerOutput(name, LayerType.SPP_LAYER, parents=[input], -# num_filters=num_channels) +@wrap_name_default("spp") +@layer_support() +def spp_layer(input, name=None, num_channels=None, pool_type=None, + pyramid_height=None, img_width=None, layer_attr=None): + pass + """ + Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. + The details please refer to + `Kaiming He's paper `_. + + :param name: layer name. + :type name: basestring + :param input: layer's input. + :type input: LayerOutput + :param num_channels: number of input channel. + :type num_channels: int + :param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling. + :type scale: BasePoolingType + :param pyramid_height: pyramid height. + :type pyramid_height: int + :param img_width: the width of input feature map. If it is None, the input feature + map should be square. + :type img_width: int|None + :param layer_attr: Extra Layer Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters + + if pool_type is None: + pool_type = MaxPooling() + elif isinstance(pool_type, AvgPooling): + pool_type.name = 'avg' + + type_name = pool_type.name + if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)): + type_name += '-projection' + + Layer( + name=name, + type=LayerType.SPP_LAYER, + inputs=Input(input.name, + spp=SpatialPyramidPool(pool_type=type_name, + channels=num_channels, + pyramid_height=pyramid_height, + img_width=img_width) + ), + **ExtraLayerAttribute.to_kwargs(layer_attr) + ) + return LayerOutput(name, LayerType.SPP_LAYER, parents=[input], + num_filters=num_channels) def __img_norm_layer__(name, input, size, norm_type, scale, power, diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 7440181970af1..e1686742b778f 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -11,11 +11,9 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -# test_maxout test_bi_grumemory math_ops test_spp_layer) test_maxout test_bi_grumemory math_ops test_spp_layer) - for conf in ${configs[*]} do echo "Generating " $conf From 0c7ac3d9c2062572967f3a34bee68aba9fc82409 Mon Sep 17 00:00:00 2001 From: gangliao Date: Tue, 8 Nov 2016 03:00:42 -0800 Subject: [PATCH 108/180] Fix Travis Ci does not build when push patches (#399) --- .travis.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index 74aa767febeb1..ffe3bc193b49e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -42,13 +42,11 @@ addons: before_install: - | if [ ${JOB} == "BUILD_AND_TEST" ]; then - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then - TRAVIS_COMMIT_RANGE="FETCH_HEAD...$TRAVIS_BRANCH" - fi - git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' || { + if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(\.md$)' + then echo "Only markdown docs were updated, stopping build process." exit - } + fi fi - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo paddle/scripts/travis/before_install.linux.sh; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then paddle/scripts/travis/before_install.osx.sh; fi From 70e04683dddd4ae47f1947f95008413ce1e69fce Mon Sep 17 00:00:00 2001 From: qijun Date: Tue, 8 Nov 2016 13:26:41 +0000 Subject: [PATCH 109/180] add getSize method for PoolProjection --- paddle/gserver/layers/PoolProjection.cpp | 4 ++++ paddle/gserver/layers/PoolProjection.h | 20 +++++++++++++++++++ paddle/gserver/layers/PoolProjectionLayer.cpp | 2 -- .../layers/SpatialPyramidPoolLayer.cpp | 4 ---- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index 8c7d027c07250..a4fb001ffa0e8 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -32,6 +32,8 @@ PoolProjection* PoolProjection::create(const ProjectionConfig& config, } void MaxPoolProjection::forward() { + size_t width = getSize(); + CHECK_EQ(width, out_->value->getWidth()); MatrixPtr inputV = in_->value; MatrixPtr outV = out_->value; outV->maxPoolForward(*inputV, imgSizeY_, imgSize_, channels_, sizeX_, sizeY_, @@ -55,6 +57,8 @@ void MaxPoolProjection::backward(const UpdateCallback& callback) { } void AvgPoolProjection::forward() { + size_t width = getSize(); + CHECK_EQ(width, out_->value->getWidth()); MatrixPtr inputV = in_->value; MatrixPtr outV = out_->value; outV->avgPoolForward(*inputV, imgSizeY_, imgSize_, channels_, sizeX_, sizeY_, diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h index 9fa16c1ea64c5..04d592f51fb59 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/gserver/layers/PoolProjection.h @@ -51,6 +51,26 @@ class PoolProjection : public Projection { static PoolProjection* create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu); const std::string& getPoolType() const { return poolType_; } + size_t getSize() { + imgSizeY_ = in_->getFrameHeight(); + imgSize_ = in_->getFrameWidth(); + const PoolConfig& conf = config_.pool_conf(); + if (imgSizeY_ == 0) { + imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + } + if (imgSize_ == 0) { + imgSize_ = conf.img_size(); + } + outputY_ = outputSize(imgSizeY_, sizeY_, confPaddingY_, strideY_, + /* caffeMode */ false); + outputX_ = outputSize(imgSize_, sizeX_, confPadding_, stride_, + /* caffeMode */ false); + + const_cast(out_)->setFrameHeight(outputY_); + const_cast(out_)->setFrameWidth(outputX_); + + return outputY_ * outputX_ * channels_; + } }; class MaxPoolProjection : public PoolProjection { diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/gserver/layers/PoolProjectionLayer.cpp index fbef55e4d49a0..cabb346d6c991 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/gserver/layers/PoolProjectionLayer.cpp @@ -38,8 +38,6 @@ size_t PoolProjectionLayer::getSize() { layerSize = outputH_ * outputW_ * channels_; - getOutput().setFrameHeight(outputH_); - getOutput().setFrameWidth(outputW_); return layerSize; } diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp index 846e2e0666030..582abf78c84a4 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp @@ -70,10 +70,6 @@ size_t SpatialPyramidPoolLayer::getSize() { size_t outputW = (std::pow(4, pyramidHeight_) - 1) / (4 - 1); layerSize = outputH * outputW * channels_; - - getOutput().setFrameHeight(outputH); - getOutput().setFrameWidth(outputW); - return layerSize; } From 8c40bfd0fc37fe193f5d1c57a55a389f3871cfa6 Mon Sep 17 00:00:00 2001 From: Liang Zhao Date: Mon, 7 Nov 2016 14:26:28 -0800 Subject: [PATCH 110/180] Make matrix well-conditioned when unittest inverse --- paddle/math/Matrix.cpp | 21 +++++++++++++++++++++ paddle/math/Matrix.h | 6 ++++++ paddle/math/tests/test_matrixCompare.cpp | 13 ++++++++----- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 4fc9b2d089366..f0f5ebe3bd05c 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -202,6 +202,17 @@ void GpuMatrix::resetOne() { CHECK(data_ != NULL); one(); } + +void GpuMatrix::setDiag(real value) { + CHECK(data_ != NULL); + CHECK_EQ(height_, width_); + + zeroMem(); + for (size_t i = 0; i < height_; i++) { + hl_memcpy_host2device(&data_[i * stride_ + i], &value, sizeof(real)); + } +} + void GpuMatrix::resize(size_t newHeight, size_t newWidth) { size_t newSize = newHeight * newWidth; if (NULL == memoryHandle_.get() || @@ -1244,6 +1255,16 @@ void CpuMatrix::resetOne() { BaseMatrix::one(); } +void CpuMatrix::setDiag(real value) { + CHECK(data_ != NULL); + CHECK_EQ(height_, width_); + + zeroMem(); + for (size_t i = 0; i < height_; i++) { + data_[i * stride_ + i] = value; + } +} + void CpuMatrix::copyFrom(const Matrix& src) { CHECK(isContiguous()); if (typeid(src) == typeid(GpuMatrix)) { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 293d13f4d6d5a..9e15055c056a1 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -195,6 +195,8 @@ class Matrix : public BaseMatrix { virtual void resetOne() { LOG(FATAL) << "Not implemented"; } + virtual void setDiag(real value) { LOG(FATAL) << "Not implemented"; } + virtual void copyFrom(const Matrix& src) { LOG(FATAL) << "Not implemented"; } virtual void trimFrom(const CpuSparseMatrix& src) { @@ -330,6 +332,7 @@ class Matrix : public BaseMatrix { virtual MatrixPtr getInverse() { LOG(FATAL) << "Not implemented"; + return nullptr; } /** @@ -1016,6 +1019,7 @@ class GpuMatrix : public Matrix { void zeroMem(); void resetOne(); + void setDiag(real value); void resize(size_t newHeight, size_t newWidth); void resize(size_t newHeight, size_t newWidth, @@ -1280,6 +1284,8 @@ class CpuMatrix : public Matrix { void zeroMem(); void resetOne(); + void setDiag(real value); + void resize(size_t newHeight, size_t newWidth); void resize(size_t newHeight, size_t newWidth, size_t newNnz, /* used to allocate space */ diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index b887cccaaa14e..91a68006325f6 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -647,20 +647,23 @@ void testMatrixInverse(int height) { MatrixPtr cpuI = std::make_shared(height, height); MatrixPtr gpuI = std::make_shared(height, height); + /* Make matrix well conditioned: cpu * cpuT + Identity */ cpu->randomizeUniform(); + MatrixPtr cpuT = cpu->getTranspose(); + MatrixPtr outputCheck = std::make_shared(height, height); + outputCheck->mul(cpu, cpuT); + cpu->setDiag(1.0); + cpu->add(*outputCheck); + gpu->copyFrom(*cpu); cpu->inverse(cpuI, false); gpu->inverse(gpuI, false); - MatrixPtr outputCheck = std::make_shared(height, height); outputCheck->copyFrom(*gpuI); MatrixCheckErr(*cpuI, *outputCheck); outputCheck->mul(cpu, cpuI); - cpu->zeroMem(); - for (int i = 0; i < height; i++) { - cpu->getRowBuf(i)[i] = 1.0; - } + cpu->setDiag(1.0); MatrixCheckErr(*cpu, *outputCheck); } From 992ac8f9a1c54080bc273f7748510e2b85c7f8cc Mon Sep 17 00:00:00 2001 From: Liang Zhao Date: Tue, 8 Nov 2016 10:36:22 -0800 Subject: [PATCH 111/180] Implement setDiag() with BaseMatrix::assign() --- paddle/math/Matrix.cpp | 29 +++++++++-------------------- paddle/math/Matrix.h | 2 +- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index f0f5ebe3bd05c..a5b0d959536fa 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -187,6 +187,15 @@ MatrixPtr Matrix::subMatrix(size_t startRow, size_t endRow, size_t startCol, trans_, useGpu_); } +void Matrix::setDiag(real value) { + CHECK(data_ != NULL); + CHECK_EQ(height_, width_); + + zeroMem(); + BaseMatrix diag(height_, 1, stride_ + 1, data_, false, useGpu_); + diag.assign(value); +} + GpuMatrix::GpuMatrix(size_t height, size_t width, bool trans) : Matrix(std::make_shared(height * width * sizeof(real)), height, width, trans, true) {} @@ -203,16 +212,6 @@ void GpuMatrix::resetOne() { one(); } -void GpuMatrix::setDiag(real value) { - CHECK(data_ != NULL); - CHECK_EQ(height_, width_); - - zeroMem(); - for (size_t i = 0; i < height_; i++) { - hl_memcpy_host2device(&data_[i * stride_ + i], &value, sizeof(real)); - } -} - void GpuMatrix::resize(size_t newHeight, size_t newWidth) { size_t newSize = newHeight * newWidth; if (NULL == memoryHandle_.get() || @@ -1255,16 +1254,6 @@ void CpuMatrix::resetOne() { BaseMatrix::one(); } -void CpuMatrix::setDiag(real value) { - CHECK(data_ != NULL); - CHECK_EQ(height_, width_); - - zeroMem(); - for (size_t i = 0; i < height_; i++) { - data_[i * stride_ + i] = value; - } -} - void CpuMatrix::copyFrom(const Matrix& src) { CHECK(isContiguous()); if (typeid(src) == typeid(GpuMatrix)) { diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 9e15055c056a1..120957f45d0c9 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -195,7 +195,7 @@ class Matrix : public BaseMatrix { virtual void resetOne() { LOG(FATAL) << "Not implemented"; } - virtual void setDiag(real value) { LOG(FATAL) << "Not implemented"; } + void setDiag(real value); virtual void copyFrom(const Matrix& src) { LOG(FATAL) << "Not implemented"; } From bc2b521c241f903b56596b9a52d50c6f59bc9002 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 12:07:19 +0800 Subject: [PATCH 112/180] Follow comments --- paddle/gserver/tests/test_LayerGrad.cpp | 18 +++++------------- paddle/math/Matrix.cpp | 12 ++++++++---- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 0c18611f01090..d8c45040d86bc 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -43,22 +43,14 @@ TEST(Layer, BilinearInterpLayer) { bilinear->set_img_size_x(32); bilinear->set_img_size_y(32); - bilinear->set_out_size_x(64); - bilinear->set_out_size_y(64); bilinear->set_num_channels(4); for (auto useGpu : {false, true}) { - testLayerGrad(config, "bilinear_interp", 10, false, useGpu); - } - - bilinear->set_img_size_x(32); - bilinear->set_img_size_y(32); - bilinear->set_out_size_x(32); - bilinear->set_out_size_y(32); - bilinear->set_num_channels(4); - - for (auto useGpu : {false, true}) { - testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + for (auto out_size : {32, 64, 128}) { + bilinear->set_out_size_x(out_size); + bilinear->set_out_size_y(out_size); + testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + } } } diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 4770a7203498d..80bf74bd4c51a 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -3902,6 +3902,8 @@ void CpuMatrix::bilinearForward(const Matrix& in, size_t batchSize = getHeight(); size_t inputW = in.getWidth(); size_t inputH = in.getHeight(); + size_t inPosOffset = inImgH * inImgW; + size_t outPosOffset = outImgH * outImgW; (void)(inputH); real* outData = getData(); @@ -3931,8 +3933,8 @@ void CpuMatrix::bilinearForward(const Matrix& in, h2lambda * (w2lambda * inPos[0] + w1lambda * inPos[wid]) + h1lambda * (w2lambda * inPos[hid * inImgW] + w1lambda * inPos[hid * inImgW + wid]); - inPos += inImgH * inImgW; - outPos += outImgH * outImgW; + inPos += inPosOffset; + outPos += outPosOffset; } } } @@ -3954,6 +3956,8 @@ void CpuMatrix::bilinearBackward(const Matrix& out, size_t inputH = getHeight(); size_t outputW = out.getWidth(); size_t batchSize = out.getHeight(); + size_t inPosOffset = inImgH * inImgW; + size_t outPosOffset = outImgH * outImgW; (void)(inputH); real* inGrad = getData(); @@ -3981,8 +3985,8 @@ void CpuMatrix::bilinearBackward(const Matrix& out, inPos[wid] += h2lambda * w1lambda * outPos[0]; inPos[hid * inImgW] += h1lambda * w2lambda * outPos[0]; inPos[hid * inImgW + wid] += h1lambda * w1lambda * outPos[0]; - inPos += inImgH * inImgW; - outPos += outImgH * outImgW; + inPos += inPosOffset; + outPos += outPosOffset; } } } From eaf3dec9c57a9028843767c339009c0882ba38e8 Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 9 Nov 2016 05:48:35 +0000 Subject: [PATCH 113/180] follow comments --- paddle/gserver/layers/PoolProjection.cpp | 40 +++++++++++++++++++ paddle/gserver/layers/PoolProjection.h | 40 +++---------------- paddle/gserver/layers/PoolProjectionLayer.h | 18 ++++----- .../gserver/layers/SpatialPyramidPoolLayer.h | 11 ++++- .../configs/protostr/test_spp_layer.protostr | 34 ++++++++++++++++ .../tests/configs/test_spp_layer.py | 2 +- 6 files changed, 98 insertions(+), 47 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_spp_layer.protostr diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/gserver/layers/PoolProjection.cpp index a4fb001ffa0e8..9be5aba3d57d2 100644 --- a/paddle/gserver/layers/PoolProjection.cpp +++ b/paddle/gserver/layers/PoolProjection.cpp @@ -18,6 +18,46 @@ namespace paddle { REGISTER_PROJECTION_CREATE_FUNC(pool, &PoolProjection::create); +PoolProjection::PoolProjection(const ProjectionConfig& config, + ParameterPtr parameter, bool useGpu) + : Projection(config, parameter, useGpu) { + const PoolConfig& conf = config_.pool_conf(); + poolType_ = conf.pool_type(); + channels_ = conf.channels(); + sizeX_ = conf.size_x(); + stride_ = conf.stride(); + outputX_ = conf.output_x(); + imgSize_ = conf.img_size(); + confPadding_ = conf.padding(); + + sizeY_ = conf.has_size_y() ? conf.size_y() : conf.size_x(); + imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride(); + confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); + outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); +} + +size_t PoolProjection::getSize() { + imgSizeY_ = in_->getFrameHeight(); + imgSize_ = in_->getFrameWidth(); + const PoolConfig& conf = config_.pool_conf(); + if (imgSizeY_ == 0) { + imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); + } + if (imgSize_ == 0) { + imgSize_ = conf.img_size(); + } + outputY_ = outputSize(imgSizeY_, sizeY_, confPaddingY_, strideY_, + /* caffeMode */ false); + outputX_ = outputSize(imgSize_, sizeX_, confPadding_, stride_, + /* caffeMode */ false); + + const_cast(out_)->setFrameHeight(outputY_); + const_cast(out_)->setFrameWidth(outputX_); + + return outputY_ * outputX_ * channels_; +} + PoolProjection* PoolProjection::create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) { const std::string& pool = config.pool_conf().pool_type(); diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/gserver/layers/PoolProjection.h index 04d592f51fb59..a11e25b729cb7 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/gserver/layers/PoolProjection.h @@ -31,46 +31,14 @@ class PoolProjection : public Projection { public: PoolProjection(const ProjectionConfig& config, ParameterPtr parameter, - bool useGpu) - : Projection(config, parameter, useGpu) { - const PoolConfig& conf = config_.pool_conf(); - poolType_ = conf.pool_type(); - channels_ = conf.channels(); - sizeX_ = conf.size_x(); - stride_ = conf.stride(); - outputX_ = conf.output_x(); - imgSize_ = conf.img_size(); - confPadding_ = conf.padding(); + bool useGpu); - sizeY_ = conf.has_size_y() ? conf.size_y() : conf.size_x(); - imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); - strideY_ = conf.has_stride_y() ? conf.stride_y() : conf.stride(); - confPaddingY_ = conf.has_padding_y() ? conf.padding_y() : conf.padding(); - outputY_ = conf.has_output_y() ? conf.output_y() : conf.output_x(); - } static PoolProjection* create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu); - const std::string& getPoolType() const { return poolType_; } - size_t getSize() { - imgSizeY_ = in_->getFrameHeight(); - imgSize_ = in_->getFrameWidth(); - const PoolConfig& conf = config_.pool_conf(); - if (imgSizeY_ == 0) { - imgSizeY_ = conf.has_img_size_y() ? conf.img_size_y() : conf.img_size(); - } - if (imgSize_ == 0) { - imgSize_ = conf.img_size(); - } - outputY_ = outputSize(imgSizeY_, sizeY_, confPaddingY_, strideY_, - /* caffeMode */ false); - outputX_ = outputSize(imgSize_, sizeX_, confPadding_, stride_, - /* caffeMode */ false); - const_cast(out_)->setFrameHeight(outputY_); - const_cast(out_)->setFrameWidth(outputX_); + const std::string& getPoolType() const { return poolType_; } - return outputY_ * outputX_ * channels_; - } + size_t getSize(); }; class MaxPoolProjection : public PoolProjection { @@ -78,6 +46,7 @@ class MaxPoolProjection : public PoolProjection { MaxPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) : PoolProjection(config, parameter, useGpu) {} + virtual void forward(); virtual void backward(const UpdateCallback& callback = nullptr); }; @@ -87,6 +56,7 @@ class AvgPoolProjection : public PoolProjection { AvgPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) : PoolProjection(config, parameter, useGpu) {} + virtual void forward(); virtual void backward(const UpdateCallback& callback = nullptr); }; diff --git a/paddle/gserver/layers/PoolProjectionLayer.h b/paddle/gserver/layers/PoolProjectionLayer.h index 6e336f79e9043..777b6f39e7cc4 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.h +++ b/paddle/gserver/layers/PoolProjectionLayer.h @@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once +#include #include "PoolLayer.h" #include "PoolProjection.h" #include "paddle/math/Matrix.h" -#include namespace paddle { /** @@ -32,15 +31,16 @@ class PoolProjectionLayer : public PoolLayer { ProjectionConfig projectionConfig_; public: - size_t getSize(); - virtual void forward(PassType passType); - virtual void backward(const UpdateCallback& callback = nullptr); - explicit PoolProjectionLayer(const LayerConfig& config) - : PoolLayer(config) { + explicit PoolProjectionLayer(const LayerConfig& config) : PoolLayer(config) { PoolConfig* conf = projectionConfig_.mutable_pool_conf(); *conf = config_.inputs(0).pool_conf(); - poolProjection_.reset(PoolProjection::create(projectionConfig_, nullptr, - useGpu_)); + poolProjection_.reset( + PoolProjection::create(projectionConfig_, nullptr, useGpu_)); } + + size_t getSize(); + + virtual void forward(PassType passType); + virtual void backward(const UpdateCallback& callback = nullptr); }; } // namespace paddle diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/gserver/layers/SpatialPyramidPoolLayer.h index 64f3fda8a0adf..8416a717d654e 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.h @@ -12,15 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - #pragma once #include "Layer.h" #include "PoolProjection.h" -#include "paddle/utils/Logging.h" #include "paddle/math/MathUtils.h" +#include "paddle/utils/Logging.h" namespace paddle { +/** + * @brief A layer for spatial pyramid pooling on the input image by taking + * the max, average, etc. within regions, so that the result vector of + * different sized images are of the same size. + */ class SpatialPyramidPoolLayer : public Layer { protected: @@ -36,12 +40,15 @@ class SpatialPyramidPoolLayer : public Layer { public: explicit SpatialPyramidPoolLayer(const LayerConfig& config) : Layer(config) {} + ~SpatialPyramidPoolLayer() {} virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + ProjectionConfig getConfig(size_t sizeX_, size_t sizeY_, size_t channels, size_t pyamidLevel_, std::string& poolType_); size_t getSize(); + virtual void forward(PassType passType); virtual void backward(const UpdateCallback& callback = nullptr); }; diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_spp_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_spp_layer.protostr new file mode 100644 index 0000000000000..8b0a8f2146b70 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_spp_layer.protostr @@ -0,0 +1,34 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 3200 + active_type: "" +} +layers { + name: "__spp_0__" + type: "spp" + size: 80 + active_type: "" + inputs { + input_layer_name: "data" + spp_conf { + pool_type: "max-projection" + pyramid_height: 2 + channels: 16 + img_size: 10 + img_size_y: 20 + } + } +} +input_layer_names: "data" +output_layer_names: "__spp_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__spp_0__" + input_layer_names: "data" + output_layer_names: "__spp_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py index 2cbc76ce20b8a..178387d3cf1d1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_spp_layer.py @@ -1,7 +1,7 @@ from paddle.trainer_config_helpers import * settings( - batch_size=100, + batch_size=100, learning_rate=1e-5 ) From 14ba68f963d2a87bcc78c06f1835b40ff7bf3135 Mon Sep 17 00:00:00 2001 From: gangliao Date: Wed, 9 Nov 2016 14:05:05 +0800 Subject: [PATCH 114/180] Update FindAVX.cmake (#404) * make AVX_FOUND is default value to WITH AVX * let AVX_FLAG always keep -mavx flag since compiler can build binary with -mavx even CPU does not support avx. --- CMakeLists.txt | 14 +++---- cmake/FindAVX.cmake | 93 +++++++++++++++++++++++++-------------------- 2 files changed, 58 insertions(+), 49 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 39f876bc9ee4b..d7e7e49e9a038 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -109,11 +109,9 @@ else() set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-g -O3 --use_fast_math") if(WITH_AVX) - if(AVX_FOUND) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler -mavx") - endif(AVX_FOUND) + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${AVX_FLAG}") else(WITH_AVX) - set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler -msse3") + set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS} "-Xcompiler ${SSE3_FLAG}") endif(WITH_AVX) if(WITH_DSO) @@ -138,11 +136,11 @@ if(NOT WITH_TIMER) endif(NOT WITH_TIMER) if(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAGS}") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAGS}") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${AVX_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${AVX_FLAG}") else(WITH_AVX) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -msse3") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse3") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SSE3_FLAG}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SSE3_FLAG}") endif(WITH_AVX) if(WITH_PYTHON) diff --git a/cmake/FindAVX.cmake b/cmake/FindAVX.cmake index f6103c6e667e8..d380c996dfa95 100644 --- a/cmake/FindAVX.cmake +++ b/cmake/FindAVX.cmake @@ -3,36 +3,55 @@ INCLUDE(CheckCXXSourceRuns) -SET(FIND_AVX_10) -SET(FIND_AVX_20) -SET(AVX_FLAGS) -SET(AVX_FOUND) - -# Check AVX 2 -SET(CMAKE_REQUIRED_FLAGS) IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - SET(CMAKE_REQUIRED_FLAGS "-mavx2") -ELSEIF(MSVC AND NOT CMAKE_CL_64) # reserve for WINDOWS - SET(CMAKE_REQUIRED_FLAGS "/arch:AVX2") + set(MMX_FLAG "-mmmx") + set(SSE2_FLAG "-msse2") + set(SSE3_FLAG "-msse3") + SET(AVX_FLAG "-mavx") + SET(AVX2_FLAG "-mavx2") +ELSEIF(MSVC) + set(MMX_FLAG "/arch:MMX") + set(SSE2_FLAG "/arch:SSE2") + set(SSE3_FLAG "/arch:SSE3") + SET(AVX_FLAG "/arch:AVX") + SET(AVX2_FLAG "/arch:AVX2") ENDIF() +# Check MMX +set(CMAKE_REQUIRED_FLAGS ${MMX_FLAG}) CHECK_CXX_SOURCE_RUNS(" -#include +#include int main() { - __m256i a = _mm256_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4); - __m256i result = _mm256_abs_epi32 (a); + _mm_setzero_si64(); return 0; -}" FIND_AVX_20) +}" MMX_FOUND) -# Check AVX -SET(CMAKE_REQUIRED_FLAGS) -IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - SET(CMAKE_REQUIRED_FLAGS "-mavx") -ELSEIF(MSVC AND NOT CMAKE_CL_64) - SET(CMAKE_REQUIRED_FLAGS "/arch:AVX") -endif() +# Check SSE2 +set(CMAKE_REQUIRED_FLAGS ${SSE2_FLAG}) +CHECK_CXX_SOURCE_RUNS(" +#include +int main() +{ + _mm_setzero_si128(); + return 0; +}" SSE2_FOUND) +# Check SSE3 +set(CMAKE_REQUIRED_FLAGS ${SSE3_FLAG}) +CHECK_CXX_SOURCE_RUNS(" +#include +int main() +{ + __m128d a = _mm_set1_pd(6.28); + __m128d b = _mm_set1_pd(3.14); + __m128d result = _mm_addsub_pd(a, b); + result = _mm_movedup_pd(result); + return 0; +}" SSE3_FOUND) + +# Check AVX +set(CMAKE_REQUIRED_FLAGS ${AVX_FLAG}) CHECK_CXX_SOURCE_RUNS(" #include int main() @@ -41,25 +60,17 @@ int main() __m256 b = _mm256_set_ps (1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f); __m256 result = _mm256_add_ps (a, b); return 0; -}" FIND_AVX_10) - -IF(${FIND_AVX_20}) - IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - SET(AVX_FLAGS "${AVX_FLAGS} -mavx2") - ELSEIF(MSVC) - SET(AVX_FLAGS "${AVX_FLAGS} /arch:AVX2") - ENDIF() -ENDIF() +}" AVX_FOUND) -IF(${FIND_AVX_10}) - IF(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - SET(AVX_FLAGS "${AVX_FLAGS} -mavx") - ELSEIF(MSVC) - SET(AVX_FLAGS "${AVX_FLAGS} /arch:AVX") - ENDIF() -ENDIF() +# Check AVX 2 +set(CMAKE_REQUIRED_FLAGS ${AVX2_FLAG}) +CHECK_CXX_SOURCE_RUNS(" +#include +int main() +{ + __m256i a = _mm256_set_epi32 (-1, 2, -3, 4, -1, 2, -3, 4); + __m256i result = _mm256_abs_epi32 (a); + return 0; +}" AVX2_FOUND) -IF(${FIND_AVX_10}) - SET(AVX_FOUND TRUE) - MESSAGE(STATUS "Find CPU supports ${AVX_FLAGS}.") -ENDIF() +mark_as_advanced(MMX_FOUND SSE2_FOUND SSE3_FOUND AVX_FOUND AVX2_FOUND) From e6c83f4ec058c65b3ded7605a1c85910caf7a0b0 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Wed, 9 Nov 2016 14:59:02 +0800 Subject: [PATCH 115/180] some tiny fixs (#406) * some tiny fixs * use VLOG(3) --- paddle/cuda/src/hl_dso_loader.cc | 2 +- python/paddle/trainer_config_helpers/tests/configs/.gitignore | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/cuda/src/hl_dso_loader.cc b/paddle/cuda/src/hl_dso_loader.cc index c0b5d6e357fc7..b564b96903368 100644 --- a/paddle/cuda/src/hl_dso_loader.cc +++ b/paddle/cuda/src/hl_dso_loader.cc @@ -48,7 +48,7 @@ static inline std::string join(const std::string& part1, const std::string& part static inline void GetDsoHandleFromDefaultPath( std::string& dso_path, void** dso_handle, int dynload_flags) { - LOG(INFO) << "Try to find cuda library: " << dso_path + VLOG(3) << "Try to find cuda library: " << dso_path << " from default system path."; // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH *dso_handle = dlopen(dso_path.c_str(), dynload_flags); diff --git a/python/paddle/trainer_config_helpers/tests/configs/.gitignore b/python/paddle/trainer_config_helpers/tests/configs/.gitignore index 52378fe7a4865..eb646b4a71ec1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/.gitignore +++ b/python/paddle/trainer_config_helpers/tests/configs/.gitignore @@ -1 +1 @@ -*protostr +protostr/*.unitest From bd50f93e63df045130bc8d95017fc9e07601f415 Mon Sep 17 00:00:00 2001 From: wangkuiyi Date: Tue, 8 Nov 2016 23:55:30 -0800 Subject: [PATCH 116/180] [Work in Progress] Update cluster_train.md (#391) Update cluster_train.md for easier understanding --- doc/cluster/opensource/cluster_train.md | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/doc/cluster/opensource/cluster_train.md b/doc/cluster/opensource/cluster_train.md index 4763ede39b049..cb493a88f0318 100644 --- a/doc/cluster/opensource/cluster_train.md +++ b/doc/cluster/opensource/cluster_train.md @@ -1,26 +1,24 @@ -# Cluster Training +# Distributed Training -We provide some simple scripts ```paddle/scripts/cluster_train``` to help you to launch cluster training Job to harness PaddlePaddle's distributed trainning. For MPI and other cluster scheduler refer this naive script to implement more robust cluster training platform by yourself. +In this article, we explain how to run distributed Paddle training jobs on clusters. We will create the distributed version of the single-process training example, [recommendation](https://github.com/baidu/Paddle/tree/develop/demo/recommendation). -The following cluster demo is based on RECOMMENDATION local training demo in PaddlePaddle ```demo/recommendation``` directory. Assuming you enter the ```paddle/scripts/cluster_train/``` directory. +[Scripts](https://github.com/baidu/Paddle/tree/develop/paddle/scripts/cluster_train) used in this article launch distributed jobs via SSH. They also work as a reference for users running more sophisticated cluster management systems like MPI and Kubernetes. -## Pre-requirements +## Prerequisite -Firstly, +1. Aforementioned scripts use a Python library [fabric](http://www.fabfile.org/) to run SSH commands. We can use `pip` to install fabric: -```bash + ```bash pip install fabric -``` - -Secondly, go through installing scripts to install PaddlePaddle at all nodes to make sure demo can run as local mode. For CUDA enabled training, we assume that CUDA is installed in ```/usr/local/cuda```, otherwise missed cuda runtime libraries error could be reported at cluster runtime. In one word, the local training environment should be well prepared for the simple scripts. + ``` -Then you should prepare same ROOT_DIR directory in all nodes. ROOT_DIR is from in cluster_train/conf.py. Assuming that the ROOT_DIR = /home/paddle, you can create ```paddle``` user account as well, at last ```paddle.py``` can ssh connections to all nodes with ```paddle``` user automatically. +1. We need to install PaddlePaddle on all nodes in the cluster. To enable GPUs, we need to install CUDA in `/usr/local/cuda`; otherwise Paddle would report errors at runtime. -At last you can create ssh mutual trust relationship between all nodes for easy ssh login, otherwise ```password``` should be provided at runtime from ```paddle.py```. +1. Set the `ROOT_DIR` variable in [`cluster_train/conf.py`] on all nodes. For convenience, we often create a Unix user `paddle` on all nodes and set `ROOT_DIR=/home/paddle`. In this way, we can write public SSH keys into `/home/paddle/.ssh/authorized_keys` so that user `paddle` can SSH to all nodes without password. ## Prepare Job Workspace -```Job workspace``` is defined as one package directory which contains dependency libraries, train data, test data, model config file and all other related file dependencies. +We refer to the directory where we put dependent libraries, config files, etc., as *workspace*. These ```train/test``` data should be prepared before launching cluster job. To satisfy the requirement that train/test data are placed in different directory from workspace, PADDLE refers train/test data according to index file named as ```train.list/test.list``` which are used in model config file. So the train/test data also contains train.list/test.list two list file. All local training demo already provides scripts to help you create these two files, and all nodes in cluster job will handle files with same logical code in normal condition. From 05204af1f2f1cb549c40880893ca0e2eb47e588f Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Wed, 9 Nov 2016 16:12:52 +0800 Subject: [PATCH 117/180] Fix memory leak in image classification demo, which is caused by dataprovider (#323) * the memory leak is inside one pass. --- demo/image_classification/.gitignore | 2 + .../data/download_cifar.sh | 0 demo/image_classification/image_provider.py | 39 +++++++++++-------- demo/image_classification/preprocess.py | 2 + demo/image_classification/preprocess.sh | 3 ++ demo/image_classification/vgg_16_cifar.py | 4 +- 6 files changed, 31 insertions(+), 19 deletions(-) mode change 100644 => 100755 demo/image_classification/data/download_cifar.sh diff --git a/demo/image_classification/.gitignore b/demo/image_classification/.gitignore index 76961dd1436f8..6a05b8f6632db 100644 --- a/demo/image_classification/.gitignore +++ b/demo/image_classification/.gitignore @@ -5,3 +5,5 @@ plot.png train.log image_provider_copy_1.py *pyc +train.list +test.list diff --git a/demo/image_classification/data/download_cifar.sh b/demo/image_classification/data/download_cifar.sh old mode 100644 new mode 100755 diff --git a/demo/image_classification/image_provider.py b/demo/image_classification/image_provider.py index 9e2f8b8949b39..305efbcdc6bb1 100644 --- a/demo/image_classification/image_provider.py +++ b/demo/image_classification/image_provider.py @@ -58,24 +58,29 @@ def hook(settings, img_size, mean_img_size, num_classes, color, meta, use_jpeg, settings.logger.info('DataProvider Initialization finished') -@provider(init_hook=hook) -def processData(settings, file_name): +@provider(init_hook=hook, min_pool_size=0) +def processData(settings, file_list): """ The main function for loading data. Load the batch, iterate all the images and labels in this batch. - file_name: the batch file name. + file_list: the batch file list. """ - data = cPickle.load(io.open(file_name, 'rb')) - indexes = list(range(len(data['images']))) - if settings.is_train: - random.shuffle(indexes) - for i in indexes: - if settings.use_jpeg == 1: - img = image_util.decode_jpeg(data['images'][i]) - else: - img = data['images'][i] - img_feat = image_util.preprocess_img(img, settings.img_mean, - settings.img_size, settings.is_train, - settings.color) - label = data['labels'][i] - yield img_feat.tolist(), int(label) + with open(file_list, 'r') as fdata: + lines = [line.strip() for line in fdata] + random.shuffle(lines) + for file_name in lines: + with io.open(file_name.strip(), 'rb') as file: + data = cPickle.load(file) + indexes = list(range(len(data['images']))) + if settings.is_train: + random.shuffle(indexes) + for i in indexes: + if settings.use_jpeg == 1: + img = image_util.decode_jpeg(data['images'][i]) + else: + img = data['images'][i] + img_feat = image_util.preprocess_img(img, settings.img_mean, + settings.img_size, settings.is_train, + settings.color) + label = data['labels'][i] + yield img_feat.astype('float32'), int(label) diff --git a/demo/image_classification/preprocess.py b/demo/image_classification/preprocess.py index 0286a5d7e9dc8..fe7ea19bf0277 100755 --- a/demo/image_classification/preprocess.py +++ b/demo/image_classification/preprocess.py @@ -35,6 +35,8 @@ def option_parser(): data_creator = ImageClassificationDatasetCreater(data_dir, processed_image_size, color) + data_creator.train_list_name = "train.txt" + data_creator.test_list_name = "test.txt" data_creator.num_per_batch = 1000 data_creator.overwrite = True data_creator.create_batches() diff --git a/demo/image_classification/preprocess.sh b/demo/image_classification/preprocess.sh index dfe3eb95d1ab8..e3e86ff10675c 100755 --- a/demo/image_classification/preprocess.sh +++ b/demo/image_classification/preprocess.sh @@ -17,3 +17,6 @@ set -e data_dir=./data/cifar-out python preprocess.py -i $data_dir -s 32 -c 1 + +echo "data/cifar-out/batches/train.txt" > train.list +echo "data/cifar-out/batches/test.txt" > test.list diff --git a/demo/image_classification/vgg_16_cifar.py b/demo/image_classification/vgg_16_cifar.py index e8b8af4bd313d..edd6988c48acd 100755 --- a/demo/image_classification/vgg_16_cifar.py +++ b/demo/image_classification/vgg_16_cifar.py @@ -25,8 +25,8 @@ 'img_size': 32,'num_classes': 10, 'use_jpeg': 1,'color': "color"} - define_py_data_sources2(train_list=data_dir+"train.list", - test_list=data_dir+'test.list', + define_py_data_sources2(train_list="train.list", + test_list="train.list", module='image_provider', obj='processData', args=args) From 93dc44c9df5b56154e4da8fbacfbffb174352ebf Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 17:53:49 +0800 Subject: [PATCH 118/180] Update --- .../tests/configs/img_layers.protostr | 176 +++++ .../tests/configs/last_first_seq.protostr | 69 ++ .../tests/configs/layer_activations.protostr | 423 ++++++++++++ .../tests/configs/projections.protostr | 315 +++++++++ .../tests/configs/shared_fc.protostr | 0 .../tests/configs/shared_lstm.protostr | 393 +++++++++++ .../tests/configs/simple_rnn_layers.protostr | 418 +++++++++++ .../configs/test_bilinear_interp.protostr | 125 ++++ .../tests/configs/test_cost_layers.protostr | 289 ++++++++ .../test_cost_layers_with_weight.protostr | 111 +++ .../tests/configs/test_expand_layer.protostr | 56 ++ .../tests/configs/test_fc.protostr | 98 +++ .../configs/test_grumemory_layer.protostr | 51 ++ .../tests/configs/test_hsigmoid.protostr | 62 ++ .../configs/test_lstmemory_layer.protostr | 53 ++ .../tests/configs/test_maxout.protostr | 0 .../tests/configs/test_ntm_layers.protostr | 225 ++++++ .../tests/configs/test_print_layer.protostr | 26 + .../tests/configs/test_rnn_group.protostr | 650 ++++++++++++++++++ .../configs/test_sequence_pooling.protostr | 111 +++ .../tests/configs/unused_layers.protostr | 27 + .../tests/configs/util_layers.protostr | 81 +++ 22 files changed, 3759 insertions(+) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/projections.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr create mode 100644 python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr new file mode 100644 index 0000000000000..899171ff1d00b --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr @@ -0,0 +1,176 @@ +type: "nn" +layers { + name: "image" + type: "data" + size: 65536 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconv" + size: 3297856 + active_type: "" + inputs { + input_layer_name: "image" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 32 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 1 + output_x: 227 + img_size: 256 + caffe_mode: true + filter_size_y: 32 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 64 + shared_biases: true +} +layers { + name: "__batch_norm_0__" + type: "batch_norm" + size: 3297856 + active_type: "relu" + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w0" + image_conf { + channels: 64 + img_size: 227 + } + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w1" + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w2" + } + bias_parameter_name: "___batch_norm_0__.wbias" + moving_average_fraction: 0.899999976158 +} +layers { + name: "__crmnorm_0__" + type: "norm" + size: 3297856 + active_type: "" + inputs { + input_layer_name: "__batch_norm_0__" + norm_conf { + norm_type: "cmrnorm-projection" + channels: 64 + size: 32 + scale: 0.000399999989895 + pow: 0.75 + output_x: 227 + img_size: 227 + blocked: false + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 2458624 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + pool_conf { + pool_type: "max-projection" + channels: 64 + size_x: 32 + stride: 1 + output_x: 196 + img_size: 227 + padding: 0 + size_y: 32 + stride_y: 1 + output_y: 196 + img_size_y: 227 + padding_y: 0 + } + } +} +parameters { + name: "___conv_0__.w0" + size: 65536 + initial_mean: 0.0 + initial_std: 0.0441941730678 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 64 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w0" + size: 64 + initial_mean: 1.0 + initial_std: 0.0 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w1" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.w2" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "image" +output_layer_names: "__pool_0__" +output_layer_names: "__crmnorm_0__" +sub_models { + name: "root" + layer_names: "image" + layer_names: "__conv_0__" + layer_names: "__batch_norm_0__" + layer_names: "__crmnorm_0__" + layer_names: "__pool_0__" + input_layer_names: "image" + output_layer_names: "__pool_0__" + output_layer_names: "__crmnorm_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr new file mode 100644 index 0000000000000..7b2911f8e367e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr @@ -0,0 +1,69 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + select_first: true + trans_type: "seq" +} +layers { + name: "__first_seq_1__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + trans_type: "seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 30 + active_type: "linear" + inputs { + input_layer_name: "data" + } + trans_type: "non-seq" +} +input_layer_names: "data" +output_layer_names: "__first_seq_0__" +output_layer_names: "__first_seq_1__" +output_layer_names: "__last_seq_0__" +output_layer_names: "__last_seq_1__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__first_seq_0__" + layer_names: "__first_seq_1__" + layer_names: "__last_seq_0__" + layer_names: "__last_seq_1__" + input_layer_names: "data" + output_layer_names: "__first_seq_0__" + output_layer_names: "__first_seq_1__" + output_layer_names: "__last_seq_0__" + output_layer_names: "__last_seq_1__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr new file mode 100644 index 0000000000000..8ae2421727efe --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr @@ -0,0 +1,423 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "layer_0" + type: "fc" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_0.w0" + } + bias_parameter_name: "_layer_0.wbias" +} +layers { + name: "layer_1" + type: "fc" + size: 100 + active_type: "sigmoid" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_1.w0" + } + bias_parameter_name: "_layer_1.wbias" +} +layers { + name: "layer_2" + type: "fc" + size: 100 + active_type: "softmax" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_2.w0" + } + bias_parameter_name: "_layer_2.wbias" +} +layers { + name: "layer_3" + type: "fc" + size: 100 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_3.w0" + } + bias_parameter_name: "_layer_3.wbias" +} +layers { + name: "layer_4" + type: "fc" + size: 100 + active_type: "" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_4.w0" + } + bias_parameter_name: "_layer_4.wbias" +} +layers { + name: "layer_5" + type: "fc" + size: 100 + active_type: "exponential" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_5.w0" + } + bias_parameter_name: "_layer_5.wbias" +} +layers { + name: "layer_6" + type: "fc" + size: 100 + active_type: "relu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_6.w0" + } + bias_parameter_name: "_layer_6.wbias" +} +layers { + name: "layer_7" + type: "fc" + size: 100 + active_type: "brelu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_7.w0" + } + bias_parameter_name: "_layer_7.wbias" +} +layers { + name: "layer_8" + type: "fc" + size: 100 + active_type: "softrelu" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_8.w0" + } + bias_parameter_name: "_layer_8.wbias" +} +layers { + name: "layer_9" + type: "fc" + size: 100 + active_type: "stanh" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_9.w0" + } + bias_parameter_name: "_layer_9.wbias" +} +layers { + name: "layer_10" + type: "fc" + size: 100 + active_type: "abs" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_10.w0" + } + bias_parameter_name: "_layer_10.wbias" +} +layers { + name: "layer_11" + type: "fc" + size: 100 + active_type: "square" + inputs { + input_layer_name: "input" + input_parameter_name: "_layer_11.w0" + } + bias_parameter_name: "_layer_11.wbias" +} +parameters { + name: "_layer_0.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_0.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_1.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_1.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_2.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_2.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_3.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_3.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_4.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_4.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_5.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_5.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_6.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_6.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_7.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_7.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_8.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_8.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_9.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_9.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_10.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_10.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_layer_11.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_layer_11.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "input" +output_layer_names: "layer_0" +output_layer_names: "layer_1" +output_layer_names: "layer_2" +output_layer_names: "layer_3" +output_layer_names: "layer_4" +output_layer_names: "layer_5" +output_layer_names: "layer_6" +output_layer_names: "layer_7" +output_layer_names: "layer_8" +output_layer_names: "layer_9" +output_layer_names: "layer_10" +output_layer_names: "layer_11" +sub_models { + name: "root" + layer_names: "input" + layer_names: "layer_0" + layer_names: "layer_1" + layer_names: "layer_2" + layer_names: "layer_3" + layer_names: "layer_4" + layer_names: "layer_5" + layer_names: "layer_6" + layer_names: "layer_7" + layer_names: "layer_8" + layer_names: "layer_9" + layer_names: "layer_10" + layer_names: "layer_11" + input_layer_names: "input" + output_layer_names: "layer_0" + output_layer_names: "layer_1" + output_layer_names: "layer_2" + output_layer_names: "layer_3" + output_layer_names: "layer_4" + output_layer_names: "layer_5" + output_layer_names: "layer_6" + output_layer_names: "layer_7" + output_layer_names: "layer_8" + output_layer_names: "layer_9" + output_layer_names: "layer_10" + output_layer_names: "layer_11" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.protostr b/python/paddle/trainer_config_helpers/tests/configs/projections.protostr new file mode 100644 index 0000000000000..a901af6b42431 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/projections.protostr @@ -0,0 +1,315 @@ +type: "nn" +layers { + name: "test" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__embedding_0__" + type: "mixed" + size: 256 + active_type: "" + inputs { + input_layer_name: "test" + input_parameter_name: "___embedding_0__.w0" + proj_conf { + type: "table" + name: "___embedding_0__.w0" + input_size: 100 + output_size: 256 + } + } +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__embedding_0__" + input_parameter_name: "___mixed_0__.w0" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 256 + output_size: 100 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_0__" + input_parameter_name: "___mixed_1__.w0" + proj_conf { + type: "table" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_2__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_1__" + proj_conf { + type: "identity" + name: "___mixed_2__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_3__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_2__" + input_parameter_name: "___mixed_3__.w0" + proj_conf { + type: "dot_mul" + name: "___mixed_3__.w0" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__mixed_4__" + type: "mixed" + size: 300 + active_type: "" + inputs { + input_layer_name: "__mixed_3__" + input_parameter_name: "___mixed_4__.w0" + proj_conf { + type: "context" + name: "___mixed_4__.w0" + input_size: 100 + output_size: 300 + context_start: -1 + context_length: 3 + trainable_padding: true + } + } +} +layers { + name: "__mixed_5__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_2__" + } + inputs { + input_layer_name: "__mixed_3__" + } + operator_confs { + type: "dot_mul" + input_indices: 0 + input_indices: 1 + input_sizes: 100 + input_sizes: 100 + output_size: 100 + dotmul_scale: 1.0 + } +} +layers { + name: "img" + type: "data" + size: 1024 + active_type: "" +} +layers { + name: "filter" + type: "data" + size: 576 + active_type: "" +} +layers { + name: "__mixed_6__" + type: "mixed" + size: 57600 + active_type: "" + inputs { + input_layer_name: "img" + } + inputs { + input_layer_name: "filter" + } + operator_confs { + type: "conv" + input_indices: 0 + input_indices: 1 + input_sizes: 1024 + input_sizes: 576 + output_size: 57600 + conv_conf { + filter_size: 3 + channels: 1 + stride: 1 + padding: 0 + groups: 1 + filter_channels: 1 + output_x: 30 + img_size: 32 + caffe_mode: true + filter_size_y: 3 + padding_y: 0 + stride_y: 1 + } + num_filters: 64 + } +} +layers { + name: "__mixed_7__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_4__" + input_parameter_name: "___mixed_7__.w0" + proj_conf { + type: "fc" + name: "___mixed_7__.w0" + input_size: 300 + output_size: 100 + } + } + inputs { + input_layer_name: "__mixed_5__" + input_parameter_name: "___mixed_7__.w1" + proj_conf { + type: "trans_fc" + name: "___mixed_7__.w1" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__mixed_6__" + input_parameter_name: "___mixed_7__.w2" + proj_conf { + type: "fc" + name: "___mixed_7__.w2" + input_size: 57600 + output_size: 100 + } + } + drop_rate: 0.5 +} +parameters { + name: "___embedding_0__.w0" + size: 25600 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 256 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_0__.w0" + size: 25600 + initial_mean: 0.0 + initial_std: 0.0625 + dims: 256 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_1__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_3__.w0" + size: 100 + initial_mean: 0.0 + initial_std: 1.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_4__.w0" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 2 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___mixed_7__.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.0577350258827 + dims: 300 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_7__.w1" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_7__.w2" + size: 5760000 + initial_mean: 0.0 + initial_std: 0.00416666688398 + dims: 57600 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "test" +input_layer_names: "img" +input_layer_names: "filter" +output_layer_names: "__mixed_7__" +sub_models { + name: "root" + layer_names: "test" + layer_names: "__embedding_0__" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__mixed_2__" + layer_names: "__mixed_3__" + layer_names: "__mixed_4__" + layer_names: "__mixed_5__" + layer_names: "img" + layer_names: "filter" + layer_names: "__mixed_6__" + layer_names: "__mixed_7__" + input_layer_names: "test" + input_layer_names: "img" + input_layer_names: "filter" + output_layer_names: "__mixed_7__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr new file mode 100644 index 0000000000000..26eed43a459f5 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr @@ -0,0 +1,393 @@ +type: "recurrent_nn" +layers { + name: "data_a" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "data_b" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "data_a" + input_parameter_name: "mixed_param" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "data_b" + input_parameter_name: "mixed_param" + proj_conf { + type: "fc" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_0__@__lstm_group_0___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_0___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + input_parameter_name: "lstm_param" + proj_conf { + type: "fc" + name: "___lstm_group_0___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + } + bias_parameter_name: "lstm_bias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_1__@__lstm_group_1___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_1__@__lstm_group_1___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_1___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + input_parameter_name: "lstm_param" + proj_conf { + type: "fc" + name: "___lstm_group_1___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + } + bias_parameter_name: "lstm_bias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_1__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_0__" + } + trans_type: "non-seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_1__" + } + trans_type: "non-seq" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 10 + active_type: "softmax" + inputs { + input_layer_name: "__last_seq_0__" + input_parameter_name: "softmax_param" + } + inputs { + input_layer_name: "__last_seq_1__" + input_parameter_name: "softmax_param" + } +} +layers { + name: "label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__cost_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + coeff: 1.0 +} +parameters { + name: "mixed_param" + size: 40000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "lstm_param" + size: 40000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "lstm_bias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "softmax_param" + size: 1000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 10 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data_a" +input_layer_names: "data_b" +input_layer_names: "label" +output_layer_names: "__cost_0__" +evaluators { + name: "classification_error_evaluator" + type: "classification_error" + input_layers: "__fc_layer_0__" + input_layers: "label" +} +sub_models { + name: "root" + layer_names: "data_a" + layer_names: "data_b" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__" + layer_names: "__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__" + layer_names: "__last_seq_0__" + layer_names: "__last_seq_1__" + layer_names: "__fc_layer_0__" + layer_names: "label" + layer_names: "__cost_0__" + input_layer_names: "data_a" + input_layer_names: "data_b" + input_layer_names: "label" + output_layer_names: "__cost_0__" + evaluator_names: "classification_error_evaluator" + is_recurrent_layer_group: false +} +sub_models { + name: "__lstm_group_0___recurrent_group" + layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_0__" + link_name: "__mixed_0__@__lstm_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__lstm_group_1___recurrent_group" + layer_names: "__mixed_1__@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1__@__lstm_group_1___recurrent_group" + layer_names: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_1__" + link_name: "__mixed_1__@__lstm_group_1___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" + link_name: "__lstm_group_1__" + has_subseq: false + } + target_inlinkid: -1 +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr new file mode 100644 index 0000000000000..57445243bd06f --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr @@ -0,0 +1,418 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "__recurrent_layer_0__" + type: "recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___recurrent_layer_0__.w0" + } + bias_parameter_name: "___recurrent_layer_0__.wbias" + reversed: false +} +layers { + name: "__recurrent_layer_1__" + type: "recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___recurrent_layer_1__.w0" + } + bias_parameter_name: "___recurrent_layer_1__.wbias" + reversed: true +} +layers { + name: "__fc_layer_1__" + type: "fc" + size: 800 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_1__.w0" + } +} +layers { + name: "__lstmemory_0__" + type: "lstmemory" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_1__" + input_parameter_name: "___lstmemory_0__.w0" + } + bias_parameter_name: "___lstmemory_0__.wbias" + reversed: false + active_gate_type: "sigmoid" + active_state_type: "tanh" +} +layers { + name: "__fc_layer_2__" + type: "fc" + size: 800 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_2__.w0" + } +} +layers { + name: "__lstmemory_1__" + type: "lstmemory" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_2__" + input_parameter_name: "___lstmemory_1__.w0" + } + bias_parameter_name: "___lstmemory_1__.wbias" + reversed: true + active_gate_type: "sigmoid" + active_state_type: "tanh" +} +layers { + name: "__fc_layer_3__" + type: "fc" + size: 600 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_3__.w0" + } +} +layers { + name: "__gru_0__" + type: "gated_recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_3__" + input_parameter_name: "___gru_0__.w0" + } + bias_parameter_name: "___gru_0__.wbias" + reversed: false + active_gate_type: "sigmoid" +} +layers { + name: "__fc_layer_4__" + type: "fc" + size: 600 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___fc_layer_4__.w0" + } +} +layers { + name: "__gru_1__" + type: "gated_recurrent" + size: 200 + active_type: "sigmoid" + inputs { + input_layer_name: "__fc_layer_4__" + input_parameter_name: "___gru_1__.w0" + } + bias_parameter_name: "___gru_1__.wbias" + reversed: true + active_gate_type: "sigmoid" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__recurrent_layer_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__recurrent_layer_1__" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__lstmemory_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__lstmemory_1__" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__last_seq_2__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__gru_0__" + } + trans_type: "non-seq" +} +layers { + name: "__first_seq_2__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "__gru_1__" + } + select_first: true + trans_type: "non-seq" +} +parameters { + name: "___fc_layer_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___recurrent_layer_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___recurrent_layer_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___recurrent_layer_1__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___recurrent_layer_1__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_1__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 800 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.wbias" + size: 1400 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1400 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_2__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 800 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_1__.w0" + size: 160000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_1__.wbias" + size: 1400 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1400 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_3__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.wbias" + size: 600 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 600 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_4__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_1__.w0" + size: 120000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 600 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_1__.wbias" + size: 600 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 600 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__last_seq_0__" +output_layer_names: "__first_seq_0__" +output_layer_names: "__last_seq_1__" +output_layer_names: "__first_seq_1__" +output_layer_names: "__last_seq_2__" +output_layer_names: "__first_seq_2__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__fc_layer_0__" + layer_names: "__recurrent_layer_0__" + layer_names: "__recurrent_layer_1__" + layer_names: "__fc_layer_1__" + layer_names: "__lstmemory_0__" + layer_names: "__fc_layer_2__" + layer_names: "__lstmemory_1__" + layer_names: "__fc_layer_3__" + layer_names: "__gru_0__" + layer_names: "__fc_layer_4__" + layer_names: "__gru_1__" + layer_names: "__last_seq_0__" + layer_names: "__first_seq_0__" + layer_names: "__last_seq_1__" + layer_names: "__first_seq_1__" + layer_names: "__last_seq_2__" + layer_names: "__first_seq_2__" + input_layer_names: "data" + output_layer_names: "__last_seq_0__" + output_layer_names: "__first_seq_0__" + output_layer_names: "__last_seq_1__" + output_layer_names: "__first_seq_1__" + output_layer_names: "__last_seq_2__" + output_layer_names: "__first_seq_2__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr new file mode 100644 index 0000000000000..278088d4abd50 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr @@ -0,0 +1,125 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 2304 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconv" + size: 36864 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 3 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 1 + output_x: 48 + img_size: 48 + caffe_mode: true + filter_size_y: 3 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 16 + shared_biases: true +} +layers { + name: "__bilinear_interp_layer_0__" + type: "bilinear_interp" + size: 36864 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + bilinear_interp_conf { + img_size_x: 32 + img_size_y: 32 + out_size_x: 64 + out_size_y: 64 + num_channels: 16 + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 9216 + active_type: "" + inputs { + input_layer_name: "__bilinear_interp_layer_0__" + pool_conf { + pool_type: "max-projection" + channels: 4 + size_x: 2 + stride: 2 + output_x: 48 + img_size: 96 + padding: 0 + size_y: 2 + stride_y: 2 + output_y: 48 + img_size_y: 96 + padding_y: 0 + } + } +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 384 + active_type: "tanh" + inputs { + input_layer_name: "__pool_0__" + input_parameter_name: "___fc_layer_0__.w0" + } +} +parameters { + name: "___conv_0__.w0" + size: 144 + initial_mean: 0.0 + initial_std: 0.471404522657 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 16 + initial_mean: 0.0 + initial_std: 0.0 + dims: 16 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_0__.w0" + size: 3538944 + initial_mean: 0.0 + initial_std: 0.0104166669771 + dims: 9216 + dims: 384 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data" +output_layer_names: "__fc_layer_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__conv_0__" + layer_names: "__bilinear_interp_layer_0__" + layer_names: "__pool_0__" + layer_names: "__fc_layer_0__" + input_layer_names: "data" + output_layer_names: "__fc_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr new file mode 100644 index 0000000000000..c37586f4068e4 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr @@ -0,0 +1,289 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "labels" + type: "data" + size: 5000 + active_type: "" +} +layers { + name: "probs" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "xe-label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__ctc_layer_0__" + type: "ctc" + size: 5001 + active_type: "" + inputs { + input_layer_name: "input" + } + inputs { + input_layer_name: "labels" + } + norm_by_times: false +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 4 + active_type: "tanh" + inputs { + input_layer_name: "input" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "crf_label" + type: "data" + size: 4 + active_type: "" +} +layers { + name: "__crf_layer_0__" + type: "crf" + size: 4 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + input_parameter_name: "___crf_layer_0__.w0" + } + inputs { + input_layer_name: "crf_label" + } + coeff: 1.0 +} +layers { + name: "left" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "right" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__rank_cost_0__" + type: "rank-cost" + size: 1 + active_type: "" + inputs { + input_layer_name: "left" + } + inputs { + input_layer_name: "right" + } + inputs { + input_layer_name: "label" + } + coeff: 1.0 +} +layers { + name: "list_feature" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "list_scores" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__lambda_cost_0__" + type: "lambda_cost" + size: 1 + active_type: "" + inputs { + input_layer_name: "list_feature" + } + inputs { + input_layer_name: "list_scores" + } + NDCG_num: 5 + max_sort_size: -1 +} +layers { + name: "__cross_entropy_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + coeff: 1.0 +} +layers { + name: "__cross_entropy_with_selfnorm_0__" + type: "multi_class_cross_entropy_with_selfnorm" + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + softmax_selfnorm_alpha: 0.10000000149 + coeff: 1.0 +} +layers { + name: "huber_probs" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "huber_label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__huber_cost_0__" + type: "huber" + size: 1 + active_type: "" + inputs { + input_layer_name: "huber_probs" + } + inputs { + input_layer_name: "huber_label" + } + coeff: 1.0 +} +layers { + name: "__multi_binary_label_cross_entropy_0__" + type: "multi_binary_label_cross_entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "probs" + } + inputs { + input_layer_name: "xe-label" + } + coeff: 1.0 +} +parameters { + name: "___fc_layer_0__.w0" + size: 800 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 4 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 4 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___crf_layer_0__.w0" + size: 24 + initial_mean: 0.0 + initial_std: 0.5 + dims: 4 + dims: 6 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "input" +input_layer_names: "labels" +input_layer_names: "crf_label" +input_layer_names: "left" +input_layer_names: "right" +input_layer_names: "label" +input_layer_names: "list_feature" +input_layer_names: "list_scores" +input_layer_names: "probs" +input_layer_names: "xe-label" +input_layer_names: "huber_probs" +input_layer_names: "huber_label" +output_layer_names: "__ctc_layer_0__" +output_layer_names: "__crf_layer_0__" +output_layer_names: "__rank_cost_0__" +output_layer_names: "__lambda_cost_0__" +output_layer_names: "__cross_entropy_0__" +output_layer_names: "__cross_entropy_with_selfnorm_0__" +output_layer_names: "__huber_cost_0__" +output_layer_names: "__multi_binary_label_cross_entropy_0__" +sub_models { + name: "root" + layer_names: "input" + layer_names: "labels" + layer_names: "probs" + layer_names: "xe-label" + layer_names: "__ctc_layer_0__" + layer_names: "__fc_layer_0__" + layer_names: "crf_label" + layer_names: "__crf_layer_0__" + layer_names: "left" + layer_names: "right" + layer_names: "label" + layer_names: "__rank_cost_0__" + layer_names: "list_feature" + layer_names: "list_scores" + layer_names: "__lambda_cost_0__" + layer_names: "__cross_entropy_0__" + layer_names: "__cross_entropy_with_selfnorm_0__" + layer_names: "huber_probs" + layer_names: "huber_label" + layer_names: "__huber_cost_0__" + layer_names: "__multi_binary_label_cross_entropy_0__" + input_layer_names: "input" + input_layer_names: "labels" + input_layer_names: "crf_label" + input_layer_names: "left" + input_layer_names: "right" + input_layer_names: "label" + input_layer_names: "list_feature" + input_layer_names: "list_scores" + input_layer_names: "probs" + input_layer_names: "xe-label" + input_layer_names: "huber_probs" + input_layer_names: "huber_label" + output_layer_names: "__ctc_layer_0__" + output_layer_names: "__crf_layer_0__" + output_layer_names: "__rank_cost_0__" + output_layer_names: "__lambda_cost_0__" + output_layer_names: "__cross_entropy_0__" + output_layer_names: "__cross_entropy_with_selfnorm_0__" + output_layer_names: "__huber_cost_0__" + output_layer_names: "__multi_binary_label_cross_entropy_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr new file mode 100644 index 0000000000000..de58f5c64969b --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr @@ -0,0 +1,111 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 300 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "weight" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 10 + active_type: "softmax" + inputs { + input_layer_name: "input" + input_parameter_name: "___fc_layer_0__.w0" + } + bias_parameter_name: "___fc_layer_0__.wbias" +} +layers { + name: "__cost_0__" + type: "multi-class-cross-entropy" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + inputs { + input_layer_name: "weight" + } + coeff: 1.0 +} +layers { + name: "__regression_cost_0__" + type: "square_error" + size: 1 + active_type: "" + inputs { + input_layer_name: "__fc_layer_0__" + } + inputs { + input_layer_name: "label" + } + inputs { + input_layer_name: "weight" + } + coeff: 1.0 +} +parameters { + name: "___fc_layer_0__.w0" + size: 3000 + initial_mean: 0.0 + initial_std: 0.0577350258827 + dims: 300 + dims: 10 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___fc_layer_0__.wbias" + size: 10 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 10 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "input" +input_layer_names: "label" +input_layer_names: "weight" +output_layer_names: "__cost_0__" +output_layer_names: "__regression_cost_0__" +evaluators { + name: "classification_error_evaluator" + type: "classification_error" + input_layers: "__fc_layer_0__" + input_layers: "label" + input_layers: "weight" +} +sub_models { + name: "root" + layer_names: "input" + layer_names: "label" + layer_names: "weight" + layer_names: "__fc_layer_0__" + layer_names: "__cost_0__" + layer_names: "__regression_cost_0__" + input_layer_names: "input" + input_layer_names: "label" + input_layer_names: "weight" + output_layer_names: "__cost_0__" + output_layer_names: "__regression_cost_0__" + evaluator_names: "classification_error_evaluator" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr new file mode 100644 index 0000000000000..f4b36052264bc --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr @@ -0,0 +1,56 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "data_seq" + type: "data" + size: 30 + active_type: "" +} +layers { + name: "__expand_layer_0__" + type: "expand" + size: 30 + active_type: "" + inputs { + input_layer_name: "data" + } + inputs { + input_layer_name: "data_seq" + } + trans_type: "seq" +} +layers { + name: "__expand_layer_1__" + type: "expand" + size: 30 + active_type: "" + inputs { + input_layer_name: "data" + } + inputs { + input_layer_name: "data_seq" + } + trans_type: "non-seq" +} +input_layer_names: "data" +input_layer_names: "data_seq" +output_layer_names: "__expand_layer_0__" +output_layer_names: "__expand_layer_1__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "data_seq" + layer_names: "__expand_layer_0__" + layer_names: "__expand_layer_1__" + input_layer_names: "data" + input_layer_names: "data_seq" + output_layer_names: "__expand_layer_0__" + output_layer_names: "__expand_layer_1__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr new file mode 100644 index 0000000000000..80b01246ba96f --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr @@ -0,0 +1,98 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__trans_layer_0__" + type: "trans" + size: 100 + active_type: "" + inputs { + input_layer_name: "data" + } +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__trans_layer_0__" + input_parameter_name: "___fc_layer_0__.w0" + } +} +layers { + name: "mask" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__selective_fc_layer_0__" + type: "selective_fc" + size: 100 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___selective_fc_layer_0__.w0" + } + inputs { + input_layer_name: "mask" + } + bias_parameter_name: "___selective_fc_layer_0__.wbias" + selective_fc_pass_generation: false + has_selected_colums: true + selective_fc_full_mul_ratio: 0.019999999553 +} +parameters { + name: "___fc_layer_0__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___selective_fc_layer_0__.w0" + size: 10000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + initial_strategy: 0 + initial_smart: true + is_sparse: false +} +parameters { + name: "___selective_fc_layer_0__.wbias" + size: 100 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 100 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +input_layer_names: "mask" +output_layer_names: "__fc_layer_0__" +output_layer_names: "__selective_fc_layer_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__trans_layer_0__" + layer_names: "__fc_layer_0__" + layer_names: "mask" + layer_names: "__selective_fc_layer_0__" + input_layer_names: "data" + input_layer_names: "mask" + output_layer_names: "__fc_layer_0__" + output_layer_names: "__selective_fc_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr new file mode 100644 index 0000000000000..81577910ccf34 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr @@ -0,0 +1,51 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 120 + active_type: "" +} +layers { + name: "__gru_0__" + type: "gated_recurrent" + size: 40 + active_type: "sigmoid" + inputs { + input_layer_name: "data" + input_parameter_name: "___gru_0__.w0" + } + bias_parameter_name: "___gru_0__.wbias" + reversed: true + active_gate_type: "tanh" +} +parameters { + name: "___gru_0__.w0" + size: 4800 + initial_mean: 0.0 + initial_std: 0.158113881946 + dims: 40 + dims: 120 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___gru_0__.wbias" + size: 120 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 120 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__gru_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__gru_0__" + input_layer_names: "data" + output_layer_names: "__gru_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr new file mode 100644 index 0000000000000..e8cc61b8c5410 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr @@ -0,0 +1,62 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__hsigmoid_0__" + type: "hsigmoid" + size: 1 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___hsigmoid_0__.w0" + } + inputs { + input_layer_name: "label" + } + bias_parameter_name: "___hsigmoid_0__.wbias" + num_classes: 10 +} +parameters { + name: "___hsigmoid_0__.w0" + size: 900 + initial_mean: 0.0 + initial_std: 0.333333343267 + dims: 9 + dims: 100 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___hsigmoid_0__.wbias" + size: 9 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 9 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +input_layer_names: "label" +output_layer_names: "__hsigmoid_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "label" + layer_names: "__hsigmoid_0__" + input_layer_names: "data" + input_layer_names: "label" + output_layer_names: "__hsigmoid_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr new file mode 100644 index 0000000000000..8341cd2684746 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr @@ -0,0 +1,53 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 128 + active_type: "" +} +layers { + name: "__lstmemory_0__" + type: "lstmemory" + size: 32 + active_type: "tanh" + inputs { + input_layer_name: "data" + input_parameter_name: "___lstmemory_0__.w0" + } + bias_parameter_name: "___lstmemory_0__.wbias" + reversed: true + active_gate_type: "tanh" + active_state_type: "tanh" +} +parameters { + name: "___lstmemory_0__.w0" + size: 4096 + initial_mean: 0.0 + initial_std: 0.176776692271 + dims: 32 + dims: 32 + dims: 4 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstmemory_0__.wbias" + size: 224 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 224 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "data" +output_layer_names: "__lstmemory_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__lstmemory_0__" + input_layer_names: "data" + output_layer_names: "__lstmemory_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr new file mode 100644 index 0000000000000..44400e2c3a23d --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr @@ -0,0 +1,225 @@ +type: "nn" +layers { + name: "w" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "a" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "b" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "c" + type: "data" + size: 200 + active_type: "" +} +layers { + name: "d" + type: "data" + size: 31 + active_type: "" +} +layers { + name: "__interpolation_layer_0__" + type: "interpolation" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__power_layer_0__" + type: "power" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } +} +layers { + name: "__scaling_layer_0__" + type: "scaling" + size: 100 + active_type: "" + inputs { + input_layer_name: "w" + } + inputs { + input_layer_name: "a" + } +} +layers { + name: "__cos_sim_0__" + type: "cos" + size: 1 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } + cos_scale: 5.0 +} +layers { + name: "__cos_sim_1__" + type: "cos_vm" + size: 2 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "c" + } + cos_scale: 5.0 +} +layers { + name: "__sum_to_one_norm_layer_0__" + type: "sum_to_one_norm" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } +} +layers { + name: "__conv_shift_layer_0__" + type: "conv_shift" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "d" + } +} +layers { + name: "__tensor_layer_0__" + type: "tensor" + size: 1000 + active_type: "" + inputs { + input_layer_name: "a" + input_parameter_name: "___tensor_layer_0__.w0" + } + inputs { + input_layer_name: "b" + } + bias_parameter_name: "___tensor_layer_0__.wbias" +} +layers { + name: "__slope_intercept_layer_0__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "a" + } + slope: 0.699999988079 + intercept: 0.899999976158 +} +layers { + name: "__linear_comb_layer_0__" + type: "convex_comb" + size: 2 + active_type: "" + inputs { + input_layer_name: "b" + } + inputs { + input_layer_name: "c" + } +} +parameters { + name: "___tensor_layer_0__.w0" + size: 10000000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 100 + dims: 1000 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___tensor_layer_0__.wbias" + size: 1000 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 1000 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "w" +input_layer_names: "a" +input_layer_names: "b" +input_layer_names: "c" +input_layer_names: "d" +output_layer_names: "__interpolation_layer_0__" +output_layer_names: "__power_layer_0__" +output_layer_names: "__scaling_layer_0__" +output_layer_names: "__cos_sim_0__" +output_layer_names: "__cos_sim_1__" +output_layer_names: "__sum_to_one_norm_layer_0__" +output_layer_names: "__conv_shift_layer_0__" +output_layer_names: "__tensor_layer_0__" +output_layer_names: "__slope_intercept_layer_0__" +output_layer_names: "__linear_comb_layer_0__" +sub_models { + name: "root" + layer_names: "w" + layer_names: "a" + layer_names: "b" + layer_names: "c" + layer_names: "d" + layer_names: "__interpolation_layer_0__" + layer_names: "__power_layer_0__" + layer_names: "__scaling_layer_0__" + layer_names: "__cos_sim_0__" + layer_names: "__cos_sim_1__" + layer_names: "__sum_to_one_norm_layer_0__" + layer_names: "__conv_shift_layer_0__" + layer_names: "__tensor_layer_0__" + layer_names: "__slope_intercept_layer_0__" + layer_names: "__linear_comb_layer_0__" + input_layer_names: "w" + input_layer_names: "a" + input_layer_names: "b" + input_layer_names: "c" + input_layer_names: "d" + output_layer_names: "__interpolation_layer_0__" + output_layer_names: "__power_layer_0__" + output_layer_names: "__scaling_layer_0__" + output_layer_names: "__cos_sim_0__" + output_layer_names: "__cos_sim_1__" + output_layer_names: "__sum_to_one_norm_layer_0__" + output_layer_names: "__conv_shift_layer_0__" + output_layer_names: "__tensor_layer_0__" + output_layer_names: "__slope_intercept_layer_0__" + output_layer_names: "__linear_comb_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr new file mode 100644 index 0000000000000..c402aff174ab7 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr @@ -0,0 +1,26 @@ +type: "nn" +layers { + name: "input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__print_0__" + type: "print" + active_type: "" + inputs { + input_layer_name: "input" + } +} +input_layer_names: "input" +output_layer_names: "input" +sub_models { + name: "root" + layer_names: "input" + layer_names: "__print_0__" + input_layer_names: "input" + output_layer_names: "input" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr new file mode 100644 index 0000000000000..dfb5ce20a31a0 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr @@ -0,0 +1,650 @@ +type: "recurrent_nn" +layers { + name: "seq_input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "sub_seq_input" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "label" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__mixed_0__" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "seq_input" + input_parameter_name: "___mixed_0__.w0" + proj_conf { + type: "fc" + name: "___mixed_0__.w0" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__mixed_1__" + type: "mixed" + size: 300 + active_type: "" + inputs { + input_layer_name: "seq_input" + input_parameter_name: "___mixed_1__.w0" + proj_conf { + type: "fc" + name: "___mixed_1__.w0" + input_size: 100 + output_size: 300 + } + } +} +layers { + name: "__recurrent_group_0__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "seq_input@__recurrent_group_0__" + type: "scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_forward+delay1@__recurrent_group_0__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_forward@__recurrent_group_0__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "seq_input@__recurrent_group_0__" + input_parameter_name: "_rnn_forward@__recurrent_group_0__.w0" + } + inputs { + input_layer_name: "rnn_forward+delay1@__recurrent_group_0__" + input_parameter_name: "_rnn_forward@__recurrent_group_0__.w1" + } + bias_parameter_name: "_rnn_forward@__recurrent_group_0__.wbias" +} +layers { + name: "rnn_forward" + type: "gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__last_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_forward" + } + trans_type: "non-seq" +} +layers { + name: "__recurrent_group_1__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "seq_input@__recurrent_group_1__" + type: "scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_back+delay1@__recurrent_group_1__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_back@__recurrent_group_1__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "seq_input@__recurrent_group_1__" + input_parameter_name: "_rnn_back@__recurrent_group_1__.w0" + } + inputs { + input_layer_name: "rnn_back+delay1@__recurrent_group_1__" + input_parameter_name: "_rnn_back@__recurrent_group_1__.w1" + } + bias_parameter_name: "_rnn_back@__recurrent_group_1__.wbias" +} +layers { + name: "rnn_back" + type: "gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__first_seq_0__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_back" + } + select_first: true + trans_type: "non-seq" +} +layers { + name: "__recurrent_group_2__" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "sub_seq_input@__recurrent_group_2__" + type: "sequence_scatter_agent" + size: 100 + active_type: "" +} +layers { + name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + type: "agent" + size: 200 + active_type: "" +} +layers { + name: "rnn_subseq_forward@__recurrent_group_2__" + type: "fc" + size: 200 + active_type: "tanh" + inputs { + input_layer_name: "sub_seq_input@__recurrent_group_2__" + input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w0" + } + inputs { + input_layer_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w1" + } + bias_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" +} +layers { + name: "rnn_subseq_forward" + type: "sequence_gather_agent" + size: 200 + active_type: "" +} +layers { + name: "__last_seq_1__" + type: "seqlastins" + size: 200 + active_type: "linear" + inputs { + input_layer_name: "rnn_subseq_forward" + } + trans_type: "non-seq" +} +layers { + name: "__lstm_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_0__@__lstm_group_0___recurrent_group" + type: "scatter_agent" + size: 400 + active_type: "" +} +layers { + name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + type: "mixed" + size: 400 + active_type: "" + inputs { + input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" + proj_conf { + type: "identity" + name: "___lstm_group_0___input_recurrent.w0" + input_size: 400 + output_size: 400 + } + } + inputs { + input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + input_parameter_name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" + proj_conf { + type: "fc" + name: "___lstm_group_0___input_recurrent.w1" + input_size: 100 + output_size: 400 + } + } +} +layers { + name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + type: "lstm_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + } + inputs { + input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + } + bias_parameter_name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" + active_gate_type: "sigmoid" + active_state_type: "sigmoid" +} +layers { + name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + type: "get_output" + size: 100 + active_type: "" + inputs { + input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + input_layer_argument: "state" + } +} +layers { + name: "__lstm_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_2__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__lstm_group_0__" + } + trans_type: "non-seq" +} +layers { + name: "__gru_group_0___recurrent_group" + type: "recurrent_layer_group" + active_type: "" +} +layers { + name: "__mixed_1__@__gru_group_0___recurrent_group" + type: "scatter_agent" + size: 300 + active_type: "" +} +layers { + name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + type: "agent" + size: 100 + active_type: "" +} +layers { + name: "__gru_group_0__@__gru_group_0___recurrent_group" + type: "gru_step" + size: 100 + active_type: "tanh" + inputs { + input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group" + input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" + } + inputs { + input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + } + bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" + active_gate_type: "sigmoid" +} +layers { + name: "__gru_group_0__" + type: "gather_agent" + size: 100 + active_type: "" +} +layers { + name: "__last_seq_3__" + type: "seqlastins" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "__gru_group_0__" + } + trans_type: "non-seq" +} +parameters { + name: "___mixed_0__.w0" + size: 40000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___mixed_1__.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 300 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_forward@__recurrent_group_0__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_rnn_back@__recurrent_group_1__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_back@__recurrent_group_1__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_back@__recurrent_group_1__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.w0" + size: 20000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.0707106813788 + dims: 200 + dims: 200 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" + size: 200 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 200 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" + size: 40000 + initial_mean: 0.0 + initial_std: 0.10000000149 + dims: 100 + dims: 400 + initial_strategy: 0 + initial_smart: true +} +parameters { + name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" + size: 30000 + initial_mean: 0.0 + initial_std: 0.00999999977648 + dims: 100 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" + size: 300 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 300 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "seq_input" +input_layer_names: "sub_seq_input" +output_layer_names: "__last_seq_0__" +output_layer_names: "__first_seq_0__" +output_layer_names: "__last_seq_1__" +output_layer_names: "__last_seq_2__" +output_layer_names: "__last_seq_3__" +sub_models { + name: "root" + layer_names: "seq_input" + layer_names: "sub_seq_input" + layer_names: "label" + layer_names: "__mixed_0__" + layer_names: "__mixed_1__" + layer_names: "__recurrent_group_0__" + layer_names: "rnn_forward" + layer_names: "__last_seq_0__" + layer_names: "__recurrent_group_1__" + layer_names: "rnn_back" + layer_names: "__first_seq_0__" + layer_names: "__recurrent_group_2__" + layer_names: "rnn_subseq_forward" + layer_names: "__last_seq_1__" + layer_names: "__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__" + layer_names: "__last_seq_2__" + layer_names: "__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__" + layer_names: "__last_seq_3__" + input_layer_names: "seq_input" + input_layer_names: "sub_seq_input" + output_layer_names: "__last_seq_0__" + output_layer_names: "__first_seq_0__" + output_layer_names: "__last_seq_1__" + output_layer_names: "__last_seq_2__" + output_layer_names: "__last_seq_3__" + is_recurrent_layer_group: false +} +sub_models { + name: "__recurrent_group_0__" + layer_names: "seq_input@__recurrent_group_0__" + layer_names: "rnn_forward+delay1@__recurrent_group_0__" + layer_names: "rnn_forward@__recurrent_group_0__" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "rnn_forward@__recurrent_group_0__" + link_name: "rnn_forward+delay1@__recurrent_group_0__" + is_sequence: false + } + in_links { + layer_name: "seq_input" + link_name: "seq_input@__recurrent_group_0__" + has_subseq: false + } + out_links { + layer_name: "rnn_forward@__recurrent_group_0__" + link_name: "rnn_forward" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__recurrent_group_1__" + layer_names: "seq_input@__recurrent_group_1__" + layer_names: "rnn_back+delay1@__recurrent_group_1__" + layer_names: "rnn_back@__recurrent_group_1__" + is_recurrent_layer_group: true + reversed: true + memories { + layer_name: "rnn_back@__recurrent_group_1__" + link_name: "rnn_back+delay1@__recurrent_group_1__" + is_sequence: false + } + in_links { + layer_name: "seq_input" + link_name: "seq_input@__recurrent_group_1__" + has_subseq: false + } + out_links { + layer_name: "rnn_back@__recurrent_group_1__" + link_name: "rnn_back" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__recurrent_group_2__" + layer_names: "sub_seq_input@__recurrent_group_2__" + layer_names: "rnn_subseq_forward+delay1@__recurrent_group_2__" + layer_names: "rnn_subseq_forward@__recurrent_group_2__" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "rnn_subseq_forward@__recurrent_group_2__" + link_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" + is_sequence: false + } + in_links { + layer_name: "sub_seq_input" + link_name: "sub_seq_input@__recurrent_group_2__" + has_subseq: true + } + out_links { + layer_name: "rnn_subseq_forward@__recurrent_group_2__" + link_name: "rnn_subseq_forward" + has_subseq: true + } + target_inlinkid: -1 +} +sub_models { + name: "__lstm_group_0___recurrent_group" + layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" + layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + memories { + layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_0__" + link_name: "__mixed_0__@__lstm_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" + link_name: "__lstm_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} +sub_models { + name: "__gru_group_0___recurrent_group" + layer_names: "__mixed_1__@__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + layer_names: "__gru_group_0__@__gru_group_0___recurrent_group" + is_recurrent_layer_group: true + reversed: false + memories { + layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" + link_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" + is_sequence: false + } + in_links { + layer_name: "__mixed_1__" + link_name: "__mixed_1__@__gru_group_0___recurrent_group" + has_subseq: false + } + out_links { + layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" + link_name: "__gru_group_0__" + has_subseq: false + } + target_inlinkid: -1 +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr new file mode 100644 index 0000000000000..1999c006d237e --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr @@ -0,0 +1,111 @@ +type: "nn" +layers { + name: "dat_in" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__seq_pooling_0__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + trans_type: "seq" +} +layers { + name: "__seq_pooling_1__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_2__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "average" + trans_type: "seq" +} +layers { + name: "__seq_pooling_3__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "average" + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_4__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "sum" + trans_type: "seq" +} +layers { + name: "__seq_pooling_5__" + type: "average" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + average_strategy: "sum" + trans_type: "non-seq" +} +layers { + name: "__seq_pooling_6__" + type: "max" + size: 100 + active_type: "linear" + inputs { + input_layer_name: "dat_in" + } + output_max_index: true + trans_type: "non-seq" +} +input_layer_names: "dat_in" +output_layer_names: "__seq_pooling_0__" +output_layer_names: "__seq_pooling_1__" +output_layer_names: "__seq_pooling_2__" +output_layer_names: "__seq_pooling_3__" +output_layer_names: "__seq_pooling_4__" +output_layer_names: "__seq_pooling_5__" +output_layer_names: "__seq_pooling_6__" +sub_models { + name: "root" + layer_names: "dat_in" + layer_names: "__seq_pooling_0__" + layer_names: "__seq_pooling_1__" + layer_names: "__seq_pooling_2__" + layer_names: "__seq_pooling_3__" + layer_names: "__seq_pooling_4__" + layer_names: "__seq_pooling_5__" + layer_names: "__seq_pooling_6__" + input_layer_names: "dat_in" + output_layer_names: "__seq_pooling_0__" + output_layer_names: "__seq_pooling_1__" + output_layer_names: "__seq_pooling_2__" + output_layer_names: "__seq_pooling_3__" + output_layer_names: "__seq_pooling_4__" + output_layer_names: "__seq_pooling_5__" + output_layer_names: "__seq_pooling_6__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr new file mode 100644 index 0000000000000..89ed28406e553 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr @@ -0,0 +1,27 @@ +type: "nn" +layers { + name: "probs" + type: "data" + size: 100 + active_type: "" +} +layers { + name: "__sampling_id_layer_0__" + type: "sampling_id" + size: 100 + active_type: "" + inputs { + input_layer_name: "probs" + } +} +input_layer_names: "probs" +output_layer_names: "__sampling_id_layer_0__" +sub_models { + name: "root" + layer_names: "probs" + layer_names: "__sampling_id_layer_0__" + input_layer_names: "probs" + output_layer_names: "__sampling_id_layer_0__" + is_recurrent_layer_group: false +} + diff --git a/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr new file mode 100644 index 0000000000000..d0ad388165007 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr @@ -0,0 +1,81 @@ +type: "nn" +layers { + name: "a" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "b" + type: "data" + size: 10 + active_type: "" +} +layers { + name: "__addto_0__" + type: "addto" + size: 10 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__concat_0__" + type: "concat" + size: 20 + active_type: "" + inputs { + input_layer_name: "a" + } + inputs { + input_layer_name: "b" + } +} +layers { + name: "__concat_1__" + type: "concat2" + size: 20 + active_type: "" + inputs { + input_layer_name: "a" + proj_conf { + type: "identity" + name: "___concat_1__.w0" + input_size: 10 + output_size: 10 + } + } + inputs { + input_layer_name: "b" + proj_conf { + type: "identity" + name: "___concat_1__.w1" + input_size: 10 + output_size: 10 + } + } +} +input_layer_names: "a" +input_layer_names: "b" +output_layer_names: "__addto_0__" +output_layer_names: "__concat_0__" +output_layer_names: "__concat_1__" +sub_models { + name: "root" + layer_names: "a" + layer_names: "b" + layer_names: "__addto_0__" + layer_names: "__concat_0__" + layer_names: "__concat_1__" + input_layer_names: "a" + input_layer_names: "b" + output_layer_names: "__addto_0__" + output_layer_names: "__concat_0__" + output_layer_names: "__concat_1__" + is_recurrent_layer_group: false +} + From 4dada9c7ce40bb424ebea43ac57396d65995644f Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 19:30:08 +0800 Subject: [PATCH 119/180] Delelte old protostr --- .../tests/configs/img_layers.protostr | 176 ----- .../tests/configs/last_first_seq.protostr | 69 -- .../tests/configs/layer_activations.protostr | 423 ------------ .../tests/configs/projections.protostr | 315 --------- .../tests/configs/shared_fc.protostr | 0 .../tests/configs/shared_lstm.protostr | 393 ----------- .../tests/configs/simple_rnn_layers.protostr | 418 ----------- .../configs/test_bilinear_interp.protostr | 125 ---- .../tests/configs/test_cost_layers.protostr | 289 -------- .../test_cost_layers_with_weight.protostr | 111 --- .../tests/configs/test_expand_layer.protostr | 56 -- .../tests/configs/test_fc.protostr | 98 --- .../configs/test_grumemory_layer.protostr | 51 -- .../tests/configs/test_hsigmoid.protostr | 62 -- .../configs/test_lstmemory_layer.protostr | 53 -- .../tests/configs/test_maxout.protostr | 0 .../tests/configs/test_ntm_layers.protostr | 225 ------ .../tests/configs/test_print_layer.protostr | 26 - .../tests/configs/test_rnn_group.protostr | 650 ------------------ .../configs/test_sequence_pooling.protostr | 111 --- .../tests/configs/unused_layers.protostr | 27 - .../tests/configs/util_layers.protostr | 81 --- 22 files changed, 3759 deletions(-) delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/projections.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr delete mode 100644 python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr deleted file mode 100644 index 899171ff1d00b..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/img_layers.protostr +++ /dev/null @@ -1,176 +0,0 @@ -type: "nn" -layers { - name: "image" - type: "data" - size: 65536 - active_type: "" -} -layers { - name: "__conv_0__" - type: "exconv" - size: 3297856 - active_type: "" - inputs { - input_layer_name: "image" - input_parameter_name: "___conv_0__.w0" - conv_conf { - filter_size: 32 - channels: 1 - stride: 1 - padding: 1 - groups: 1 - filter_channels: 1 - output_x: 227 - img_size: 256 - caffe_mode: true - filter_size_y: 32 - padding_y: 1 - stride_y: 1 - } - } - bias_parameter_name: "___conv_0__.wbias" - num_filters: 64 - shared_biases: true -} -layers { - name: "__batch_norm_0__" - type: "batch_norm" - size: 3297856 - active_type: "relu" - inputs { - input_layer_name: "__conv_0__" - input_parameter_name: "___batch_norm_0__.w0" - image_conf { - channels: 64 - img_size: 227 - } - } - inputs { - input_layer_name: "__conv_0__" - input_parameter_name: "___batch_norm_0__.w1" - } - inputs { - input_layer_name: "__conv_0__" - input_parameter_name: "___batch_norm_0__.w2" - } - bias_parameter_name: "___batch_norm_0__.wbias" - moving_average_fraction: 0.899999976158 -} -layers { - name: "__crmnorm_0__" - type: "norm" - size: 3297856 - active_type: "" - inputs { - input_layer_name: "__batch_norm_0__" - norm_conf { - norm_type: "cmrnorm-projection" - channels: 64 - size: 32 - scale: 0.000399999989895 - pow: 0.75 - output_x: 227 - img_size: 227 - blocked: false - } - } -} -layers { - name: "__pool_0__" - type: "pool" - size: 2458624 - active_type: "" - inputs { - input_layer_name: "__conv_0__" - pool_conf { - pool_type: "max-projection" - channels: 64 - size_x: 32 - stride: 1 - output_x: 196 - img_size: 227 - padding: 0 - size_y: 32 - stride_y: 1 - output_y: 196 - img_size_y: 227 - padding_y: 0 - } - } -} -parameters { - name: "___conv_0__.w0" - size: 65536 - initial_mean: 0.0 - initial_std: 0.0441941730678 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___conv_0__.wbias" - size: 64 - initial_mean: 0.0 - initial_std: 0.0 - dims: 64 - dims: 1 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___batch_norm_0__.w0" - size: 64 - initial_mean: 1.0 - initial_std: 0.0 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___batch_norm_0__.w1" - size: 64 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 64 - initial_strategy: 0 - initial_smart: false - is_static: true - is_shared: true -} -parameters { - name: "___batch_norm_0__.w2" - size: 64 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 64 - initial_strategy: 0 - initial_smart: false - is_static: true - is_shared: true -} -parameters { - name: "___batch_norm_0__.wbias" - size: 64 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 64 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "image" -output_layer_names: "__pool_0__" -output_layer_names: "__crmnorm_0__" -sub_models { - name: "root" - layer_names: "image" - layer_names: "__conv_0__" - layer_names: "__batch_norm_0__" - layer_names: "__crmnorm_0__" - layer_names: "__pool_0__" - input_layer_names: "image" - output_layer_names: "__pool_0__" - output_layer_names: "__crmnorm_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr b/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr deleted file mode 100644 index 7b2911f8e367e..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/last_first_seq.protostr +++ /dev/null @@ -1,69 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 30 - active_type: "" -} -layers { - name: "__first_seq_0__" - type: "seqlastins" - size: 30 - active_type: "linear" - inputs { - input_layer_name: "data" - } - select_first: true - trans_type: "seq" -} -layers { - name: "__first_seq_1__" - type: "seqlastins" - size: 30 - active_type: "linear" - inputs { - input_layer_name: "data" - } - select_first: true - trans_type: "non-seq" -} -layers { - name: "__last_seq_0__" - type: "seqlastins" - size: 30 - active_type: "linear" - inputs { - input_layer_name: "data" - } - trans_type: "seq" -} -layers { - name: "__last_seq_1__" - type: "seqlastins" - size: 30 - active_type: "linear" - inputs { - input_layer_name: "data" - } - trans_type: "non-seq" -} -input_layer_names: "data" -output_layer_names: "__first_seq_0__" -output_layer_names: "__first_seq_1__" -output_layer_names: "__last_seq_0__" -output_layer_names: "__last_seq_1__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__first_seq_0__" - layer_names: "__first_seq_1__" - layer_names: "__last_seq_0__" - layer_names: "__last_seq_1__" - input_layer_names: "data" - output_layer_names: "__first_seq_0__" - output_layer_names: "__first_seq_1__" - output_layer_names: "__last_seq_0__" - output_layer_names: "__last_seq_1__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr b/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr deleted file mode 100644 index 8ae2421727efe..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/layer_activations.protostr +++ /dev/null @@ -1,423 +0,0 @@ -type: "nn" -layers { - name: "input" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "layer_0" - type: "fc" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_0.w0" - } - bias_parameter_name: "_layer_0.wbias" -} -layers { - name: "layer_1" - type: "fc" - size: 100 - active_type: "sigmoid" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_1.w0" - } - bias_parameter_name: "_layer_1.wbias" -} -layers { - name: "layer_2" - type: "fc" - size: 100 - active_type: "softmax" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_2.w0" - } - bias_parameter_name: "_layer_2.wbias" -} -layers { - name: "layer_3" - type: "fc" - size: 100 - active_type: "" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_3.w0" - } - bias_parameter_name: "_layer_3.wbias" -} -layers { - name: "layer_4" - type: "fc" - size: 100 - active_type: "" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_4.w0" - } - bias_parameter_name: "_layer_4.wbias" -} -layers { - name: "layer_5" - type: "fc" - size: 100 - active_type: "exponential" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_5.w0" - } - bias_parameter_name: "_layer_5.wbias" -} -layers { - name: "layer_6" - type: "fc" - size: 100 - active_type: "relu" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_6.w0" - } - bias_parameter_name: "_layer_6.wbias" -} -layers { - name: "layer_7" - type: "fc" - size: 100 - active_type: "brelu" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_7.w0" - } - bias_parameter_name: "_layer_7.wbias" -} -layers { - name: "layer_8" - type: "fc" - size: 100 - active_type: "softrelu" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_8.w0" - } - bias_parameter_name: "_layer_8.wbias" -} -layers { - name: "layer_9" - type: "fc" - size: 100 - active_type: "stanh" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_9.w0" - } - bias_parameter_name: "_layer_9.wbias" -} -layers { - name: "layer_10" - type: "fc" - size: 100 - active_type: "abs" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_10.w0" - } - bias_parameter_name: "_layer_10.wbias" -} -layers { - name: "layer_11" - type: "fc" - size: 100 - active_type: "square" - inputs { - input_layer_name: "input" - input_parameter_name: "_layer_11.w0" - } - bias_parameter_name: "_layer_11.wbias" -} -parameters { - name: "_layer_0.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_0.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_1.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_1.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_2.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_2.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_3.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_3.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_4.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_4.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_5.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_5.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_6.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_6.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_7.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_7.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_8.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_8.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_9.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_9.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_10.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_10.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_layer_11.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_layer_11.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "input" -output_layer_names: "layer_0" -output_layer_names: "layer_1" -output_layer_names: "layer_2" -output_layer_names: "layer_3" -output_layer_names: "layer_4" -output_layer_names: "layer_5" -output_layer_names: "layer_6" -output_layer_names: "layer_7" -output_layer_names: "layer_8" -output_layer_names: "layer_9" -output_layer_names: "layer_10" -output_layer_names: "layer_11" -sub_models { - name: "root" - layer_names: "input" - layer_names: "layer_0" - layer_names: "layer_1" - layer_names: "layer_2" - layer_names: "layer_3" - layer_names: "layer_4" - layer_names: "layer_5" - layer_names: "layer_6" - layer_names: "layer_7" - layer_names: "layer_8" - layer_names: "layer_9" - layer_names: "layer_10" - layer_names: "layer_11" - input_layer_names: "input" - output_layer_names: "layer_0" - output_layer_names: "layer_1" - output_layer_names: "layer_2" - output_layer_names: "layer_3" - output_layer_names: "layer_4" - output_layer_names: "layer_5" - output_layer_names: "layer_6" - output_layer_names: "layer_7" - output_layer_names: "layer_8" - output_layer_names: "layer_9" - output_layer_names: "layer_10" - output_layer_names: "layer_11" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.protostr b/python/paddle/trainer_config_helpers/tests/configs/projections.protostr deleted file mode 100644 index a901af6b42431..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/projections.protostr +++ /dev/null @@ -1,315 +0,0 @@ -type: "nn" -layers { - name: "test" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__embedding_0__" - type: "mixed" - size: 256 - active_type: "" - inputs { - input_layer_name: "test" - input_parameter_name: "___embedding_0__.w0" - proj_conf { - type: "table" - name: "___embedding_0__.w0" - input_size: 100 - output_size: 256 - } - } -} -layers { - name: "__mixed_0__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__embedding_0__" - input_parameter_name: "___mixed_0__.w0" - proj_conf { - type: "fc" - name: "___mixed_0__.w0" - input_size: 256 - output_size: 100 - } - } -} -layers { - name: "__mixed_1__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__mixed_0__" - input_parameter_name: "___mixed_1__.w0" - proj_conf { - type: "table" - name: "___mixed_1__.w0" - input_size: 100 - output_size: 100 - } - } -} -layers { - name: "__mixed_2__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__mixed_1__" - proj_conf { - type: "identity" - name: "___mixed_2__.w0" - input_size: 100 - output_size: 100 - } - } -} -layers { - name: "__mixed_3__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__mixed_2__" - input_parameter_name: "___mixed_3__.w0" - proj_conf { - type: "dot_mul" - name: "___mixed_3__.w0" - input_size: 100 - output_size: 100 - } - } -} -layers { - name: "__mixed_4__" - type: "mixed" - size: 300 - active_type: "" - inputs { - input_layer_name: "__mixed_3__" - input_parameter_name: "___mixed_4__.w0" - proj_conf { - type: "context" - name: "___mixed_4__.w0" - input_size: 100 - output_size: 300 - context_start: -1 - context_length: 3 - trainable_padding: true - } - } -} -layers { - name: "__mixed_5__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__mixed_2__" - } - inputs { - input_layer_name: "__mixed_3__" - } - operator_confs { - type: "dot_mul" - input_indices: 0 - input_indices: 1 - input_sizes: 100 - input_sizes: 100 - output_size: 100 - dotmul_scale: 1.0 - } -} -layers { - name: "img" - type: "data" - size: 1024 - active_type: "" -} -layers { - name: "filter" - type: "data" - size: 576 - active_type: "" -} -layers { - name: "__mixed_6__" - type: "mixed" - size: 57600 - active_type: "" - inputs { - input_layer_name: "img" - } - inputs { - input_layer_name: "filter" - } - operator_confs { - type: "conv" - input_indices: 0 - input_indices: 1 - input_sizes: 1024 - input_sizes: 576 - output_size: 57600 - conv_conf { - filter_size: 3 - channels: 1 - stride: 1 - padding: 0 - groups: 1 - filter_channels: 1 - output_x: 30 - img_size: 32 - caffe_mode: true - filter_size_y: 3 - padding_y: 0 - stride_y: 1 - } - num_filters: 64 - } -} -layers { - name: "__mixed_7__" - type: "mixed" - size: 100 - active_type: "" - inputs { - input_layer_name: "__mixed_4__" - input_parameter_name: "___mixed_7__.w0" - proj_conf { - type: "fc" - name: "___mixed_7__.w0" - input_size: 300 - output_size: 100 - } - } - inputs { - input_layer_name: "__mixed_5__" - input_parameter_name: "___mixed_7__.w1" - proj_conf { - type: "trans_fc" - name: "___mixed_7__.w1" - input_size: 100 - output_size: 100 - } - } - inputs { - input_layer_name: "__mixed_6__" - input_parameter_name: "___mixed_7__.w2" - proj_conf { - type: "fc" - name: "___mixed_7__.w2" - input_size: 57600 - output_size: 100 - } - } - drop_rate: 0.5 -} -parameters { - name: "___embedding_0__.w0" - size: 25600 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 256 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_0__.w0" - size: 25600 - initial_mean: 0.0 - initial_std: 0.0625 - dims: 256 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_1__.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_3__.w0" - size: 100 - initial_mean: 0.0 - initial_std: 1.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_4__.w0" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 2 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___mixed_7__.w0" - size: 30000 - initial_mean: 0.0 - initial_std: 0.0577350258827 - dims: 300 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_7__.w1" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_7__.w2" - size: 5760000 - initial_mean: 0.0 - initial_std: 0.00416666688398 - dims: 57600 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -input_layer_names: "test" -input_layer_names: "img" -input_layer_names: "filter" -output_layer_names: "__mixed_7__" -sub_models { - name: "root" - layer_names: "test" - layer_names: "__embedding_0__" - layer_names: "__mixed_0__" - layer_names: "__mixed_1__" - layer_names: "__mixed_2__" - layer_names: "__mixed_3__" - layer_names: "__mixed_4__" - layer_names: "__mixed_5__" - layer_names: "img" - layer_names: "filter" - layer_names: "__mixed_6__" - layer_names: "__mixed_7__" - input_layer_names: "test" - input_layer_names: "img" - input_layer_names: "filter" - output_layer_names: "__mixed_7__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/shared_fc.protostr deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr b/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr deleted file mode 100644 index 26eed43a459f5..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/shared_lstm.protostr +++ /dev/null @@ -1,393 +0,0 @@ -type: "recurrent_nn" -layers { - name: "data_a" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "data_b" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__mixed_0__" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "data_a" - input_parameter_name: "mixed_param" - proj_conf { - type: "fc" - name: "___mixed_0__.w0" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__mixed_1__" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "data_b" - input_parameter_name: "mixed_param" - proj_conf { - type: "fc" - name: "___mixed_1__.w0" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__lstm_group_0___recurrent_group" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "__mixed_0__@__lstm_group_0___recurrent_group" - type: "scatter_agent" - size: 400 - active_type: "" -} -layers { - name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" - proj_conf { - type: "identity" - name: "___lstm_group_0___input_recurrent.w0" - input_size: 400 - output_size: 400 - } - } - inputs { - input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - input_parameter_name: "lstm_param" - proj_conf { - type: "fc" - name: "___lstm_group_0___input_recurrent.w1" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - type: "lstm_step" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - } - inputs { - input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - } - bias_parameter_name: "lstm_bias" - active_gate_type: "sigmoid" - active_state_type: "sigmoid" -} -layers { - name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - type: "get_output" - size: 100 - active_type: "" - inputs { - input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - input_layer_argument: "state" - } -} -layers { - name: "__lstm_group_0__" - type: "gather_agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_1___recurrent_group" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "__mixed_1__@__lstm_group_1___recurrent_group" - type: "scatter_agent" - size: 400 - active_type: "" -} -layers { - name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "__mixed_1__@__lstm_group_1___recurrent_group" - proj_conf { - type: "identity" - name: "___lstm_group_1___input_recurrent.w0" - input_size: 400 - output_size: 400 - } - } - inputs { - input_layer_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" - input_parameter_name: "lstm_param" - proj_conf { - type: "fc" - name: "___lstm_group_1___input_recurrent.w1" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__lstm_group_1__@__lstm_group_1___recurrent_group" - type: "lstm_step" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" - } - inputs { - input_layer_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" - } - bias_parameter_name: "lstm_bias" - active_gate_type: "sigmoid" - active_state_type: "sigmoid" -} -layers { - name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" - type: "get_output" - size: 100 - active_type: "" - inputs { - input_layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" - input_layer_argument: "state" - } -} -layers { - name: "__lstm_group_1__" - type: "gather_agent" - size: 100 - active_type: "" -} -layers { - name: "__last_seq_0__" - type: "seqlastins" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "__lstm_group_0__" - } - trans_type: "non-seq" -} -layers { - name: "__last_seq_1__" - type: "seqlastins" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "__lstm_group_1__" - } - trans_type: "non-seq" -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 10 - active_type: "softmax" - inputs { - input_layer_name: "__last_seq_0__" - input_parameter_name: "softmax_param" - } - inputs { - input_layer_name: "__last_seq_1__" - input_parameter_name: "softmax_param" - } -} -layers { - name: "label" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "__cost_0__" - type: "multi-class-cross-entropy" - size: 1 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - } - inputs { - input_layer_name: "label" - } - coeff: 1.0 -} -parameters { - name: "mixed_param" - size: 40000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 400 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "lstm_param" - size: 40000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 400 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "lstm_bias" - size: 300 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 300 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "softmax_param" - size: 1000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 10 - initial_strategy: 0 - initial_smart: true -} -input_layer_names: "data_a" -input_layer_names: "data_b" -input_layer_names: "label" -output_layer_names: "__cost_0__" -evaluators { - name: "classification_error_evaluator" - type: "classification_error" - input_layers: "__fc_layer_0__" - input_layers: "label" -} -sub_models { - name: "root" - layer_names: "data_a" - layer_names: "data_b" - layer_names: "__mixed_0__" - layer_names: "__mixed_1__" - layer_names: "__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__" - layer_names: "__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1__" - layer_names: "__last_seq_0__" - layer_names: "__last_seq_1__" - layer_names: "__fc_layer_0__" - layer_names: "label" - layer_names: "__cost_0__" - input_layer_names: "data_a" - input_layer_names: "data_b" - input_layer_names: "label" - output_layer_names: "__cost_0__" - evaluator_names: "classification_error_evaluator" - is_recurrent_layer_group: false -} -sub_models { - name: "__lstm_group_0___recurrent_group" - layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - is_sequence: false - } - memories { - layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - is_sequence: false - } - in_links { - layer_name: "__mixed_0__" - link_name: "__mixed_0__@__lstm_group_0___recurrent_group" - has_subseq: false - } - out_links { - layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0__" - has_subseq: false - } - target_inlinkid: -1 -} -sub_models { - name: "__lstm_group_1___recurrent_group" - layer_names: "__mixed_1__@__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1___input_recurrent@__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1__@__lstm_group_1___recurrent_group" - layer_names: "__lstm_group_1___state@__lstm_group_1___recurrent_group" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" - link_name: "__lstm_group_1__+delay1@__lstm_group_1___recurrent_group" - is_sequence: false - } - memories { - layer_name: "__lstm_group_1___state@__lstm_group_1___recurrent_group" - link_name: "__lstm_group_1___state+delay1@__lstm_group_1___recurrent_group" - is_sequence: false - } - in_links { - layer_name: "__mixed_1__" - link_name: "__mixed_1__@__lstm_group_1___recurrent_group" - has_subseq: false - } - out_links { - layer_name: "__lstm_group_1__@__lstm_group_1___recurrent_group" - link_name: "__lstm_group_1__" - has_subseq: false - } - target_inlinkid: -1 -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr deleted file mode 100644 index 57445243bd06f..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/simple_rnn_layers.protostr +++ /dev/null @@ -1,418 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 200 - active_type: "" -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "data" - input_parameter_name: "___fc_layer_0__.w0" - } - bias_parameter_name: "___fc_layer_0__.wbias" -} -layers { - name: "__recurrent_layer_0__" - type: "recurrent" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___recurrent_layer_0__.w0" - } - bias_parameter_name: "___recurrent_layer_0__.wbias" - reversed: false -} -layers { - name: "__recurrent_layer_1__" - type: "recurrent" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___recurrent_layer_1__.w0" - } - bias_parameter_name: "___recurrent_layer_1__.wbias" - reversed: true -} -layers { - name: "__fc_layer_1__" - type: "fc" - size: 800 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___fc_layer_1__.w0" - } -} -layers { - name: "__lstmemory_0__" - type: "lstmemory" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_1__" - input_parameter_name: "___lstmemory_0__.w0" - } - bias_parameter_name: "___lstmemory_0__.wbias" - reversed: false - active_gate_type: "sigmoid" - active_state_type: "tanh" -} -layers { - name: "__fc_layer_2__" - type: "fc" - size: 800 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___fc_layer_2__.w0" - } -} -layers { - name: "__lstmemory_1__" - type: "lstmemory" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_2__" - input_parameter_name: "___lstmemory_1__.w0" - } - bias_parameter_name: "___lstmemory_1__.wbias" - reversed: true - active_gate_type: "sigmoid" - active_state_type: "tanh" -} -layers { - name: "__fc_layer_3__" - type: "fc" - size: 600 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___fc_layer_3__.w0" - } -} -layers { - name: "__gru_0__" - type: "gated_recurrent" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_3__" - input_parameter_name: "___gru_0__.w0" - } - bias_parameter_name: "___gru_0__.wbias" - reversed: false - active_gate_type: "sigmoid" -} -layers { - name: "__fc_layer_4__" - type: "fc" - size: 600 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___fc_layer_4__.w0" - } -} -layers { - name: "__gru_1__" - type: "gated_recurrent" - size: 200 - active_type: "sigmoid" - inputs { - input_layer_name: "__fc_layer_4__" - input_parameter_name: "___gru_1__.w0" - } - bias_parameter_name: "___gru_1__.wbias" - reversed: true - active_gate_type: "sigmoid" -} -layers { - name: "__last_seq_0__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__recurrent_layer_0__" - } - trans_type: "non-seq" -} -layers { - name: "__first_seq_0__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__recurrent_layer_1__" - } - select_first: true - trans_type: "non-seq" -} -layers { - name: "__last_seq_1__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__lstmemory_0__" - } - trans_type: "non-seq" -} -layers { - name: "__first_seq_1__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__lstmemory_1__" - } - select_first: true - trans_type: "non-seq" -} -layers { - name: "__last_seq_2__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__gru_0__" - } - trans_type: "non-seq" -} -layers { - name: "__first_seq_2__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "__gru_1__" - } - select_first: true - trans_type: "non-seq" -} -parameters { - name: "___fc_layer_0__.w0" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___fc_layer_0__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___recurrent_layer_0__.w0" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___recurrent_layer_0__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___recurrent_layer_1__.w0" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___recurrent_layer_1__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___fc_layer_1__.w0" - size: 160000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 800 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstmemory_0__.w0" - size: 160000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - dims: 4 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstmemory_0__.wbias" - size: 1400 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 1400 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___fc_layer_2__.w0" - size: 160000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 800 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstmemory_1__.w0" - size: 160000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - dims: 4 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstmemory_1__.wbias" - size: 1400 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 1400 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___fc_layer_3__.w0" - size: 120000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 600 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___gru_0__.w0" - size: 120000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 600 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___gru_0__.wbias" - size: 600 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 600 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___fc_layer_4__.w0" - size: 120000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 600 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___gru_1__.w0" - size: 120000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 600 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___gru_1__.wbias" - size: 600 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 600 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "data" -output_layer_names: "__last_seq_0__" -output_layer_names: "__first_seq_0__" -output_layer_names: "__last_seq_1__" -output_layer_names: "__first_seq_1__" -output_layer_names: "__last_seq_2__" -output_layer_names: "__first_seq_2__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__fc_layer_0__" - layer_names: "__recurrent_layer_0__" - layer_names: "__recurrent_layer_1__" - layer_names: "__fc_layer_1__" - layer_names: "__lstmemory_0__" - layer_names: "__fc_layer_2__" - layer_names: "__lstmemory_1__" - layer_names: "__fc_layer_3__" - layer_names: "__gru_0__" - layer_names: "__fc_layer_4__" - layer_names: "__gru_1__" - layer_names: "__last_seq_0__" - layer_names: "__first_seq_0__" - layer_names: "__last_seq_1__" - layer_names: "__first_seq_1__" - layer_names: "__last_seq_2__" - layer_names: "__first_seq_2__" - input_layer_names: "data" - output_layer_names: "__last_seq_0__" - output_layer_names: "__first_seq_0__" - output_layer_names: "__last_seq_1__" - output_layer_names: "__first_seq_1__" - output_layer_names: "__last_seq_2__" - output_layer_names: "__first_seq_2__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr deleted file mode 100644 index 278088d4abd50..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_bilinear_interp.protostr +++ /dev/null @@ -1,125 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 2304 - active_type: "" -} -layers { - name: "__conv_0__" - type: "exconv" - size: 36864 - active_type: "" - inputs { - input_layer_name: "data" - input_parameter_name: "___conv_0__.w0" - conv_conf { - filter_size: 3 - channels: 1 - stride: 1 - padding: 1 - groups: 1 - filter_channels: 1 - output_x: 48 - img_size: 48 - caffe_mode: true - filter_size_y: 3 - padding_y: 1 - stride_y: 1 - } - } - bias_parameter_name: "___conv_0__.wbias" - num_filters: 16 - shared_biases: true -} -layers { - name: "__bilinear_interp_layer_0__" - type: "bilinear_interp" - size: 36864 - active_type: "" - inputs { - input_layer_name: "__conv_0__" - bilinear_interp_conf { - img_size_x: 32 - img_size_y: 32 - out_size_x: 64 - out_size_y: 64 - num_channels: 16 - } - } -} -layers { - name: "__pool_0__" - type: "pool" - size: 9216 - active_type: "" - inputs { - input_layer_name: "__bilinear_interp_layer_0__" - pool_conf { - pool_type: "max-projection" - channels: 4 - size_x: 2 - stride: 2 - output_x: 48 - img_size: 96 - padding: 0 - size_y: 2 - stride_y: 2 - output_y: 48 - img_size_y: 96 - padding_y: 0 - } - } -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 384 - active_type: "tanh" - inputs { - input_layer_name: "__pool_0__" - input_parameter_name: "___fc_layer_0__.w0" - } -} -parameters { - name: "___conv_0__.w0" - size: 144 - initial_mean: 0.0 - initial_std: 0.471404522657 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___conv_0__.wbias" - size: 16 - initial_mean: 0.0 - initial_std: 0.0 - dims: 16 - dims: 1 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___fc_layer_0__.w0" - size: 3538944 - initial_mean: 0.0 - initial_std: 0.0104166669771 - dims: 9216 - dims: 384 - initial_strategy: 0 - initial_smart: true -} -input_layer_names: "data" -output_layer_names: "__fc_layer_0__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__conv_0__" - layer_names: "__bilinear_interp_layer_0__" - layer_names: "__pool_0__" - layer_names: "__fc_layer_0__" - input_layer_names: "data" - output_layer_names: "__fc_layer_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr deleted file mode 100644 index c37586f4068e4..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.protostr +++ /dev/null @@ -1,289 +0,0 @@ -type: "nn" -layers { - name: "input" - type: "data" - size: 200 - active_type: "" -} -layers { - name: "labels" - type: "data" - size: 5000 - active_type: "" -} -layers { - name: "probs" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "xe-label" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "__ctc_layer_0__" - type: "ctc" - size: 5001 - active_type: "" - inputs { - input_layer_name: "input" - } - inputs { - input_layer_name: "labels" - } - norm_by_times: false -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 4 - active_type: "tanh" - inputs { - input_layer_name: "input" - input_parameter_name: "___fc_layer_0__.w0" - } - bias_parameter_name: "___fc_layer_0__.wbias" -} -layers { - name: "crf_label" - type: "data" - size: 4 - active_type: "" -} -layers { - name: "__crf_layer_0__" - type: "crf" - size: 4 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - input_parameter_name: "___crf_layer_0__.w0" - } - inputs { - input_layer_name: "crf_label" - } - coeff: 1.0 -} -layers { - name: "left" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "right" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "label" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "__rank_cost_0__" - type: "rank-cost" - size: 1 - active_type: "" - inputs { - input_layer_name: "left" - } - inputs { - input_layer_name: "right" - } - inputs { - input_layer_name: "label" - } - coeff: 1.0 -} -layers { - name: "list_feature" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "list_scores" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "__lambda_cost_0__" - type: "lambda_cost" - size: 1 - active_type: "" - inputs { - input_layer_name: "list_feature" - } - inputs { - input_layer_name: "list_scores" - } - NDCG_num: 5 - max_sort_size: -1 -} -layers { - name: "__cross_entropy_0__" - type: "multi-class-cross-entropy" - size: 1 - active_type: "" - inputs { - input_layer_name: "probs" - } - inputs { - input_layer_name: "xe-label" - } - coeff: 1.0 -} -layers { - name: "__cross_entropy_with_selfnorm_0__" - type: "multi_class_cross_entropy_with_selfnorm" - active_type: "" - inputs { - input_layer_name: "probs" - } - inputs { - input_layer_name: "xe-label" - } - softmax_selfnorm_alpha: 0.10000000149 - coeff: 1.0 -} -layers { - name: "huber_probs" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "huber_label" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "__huber_cost_0__" - type: "huber" - size: 1 - active_type: "" - inputs { - input_layer_name: "huber_probs" - } - inputs { - input_layer_name: "huber_label" - } - coeff: 1.0 -} -layers { - name: "__multi_binary_label_cross_entropy_0__" - type: "multi_binary_label_cross_entropy" - size: 1 - active_type: "" - inputs { - input_layer_name: "probs" - } - inputs { - input_layer_name: "xe-label" - } - coeff: 1.0 -} -parameters { - name: "___fc_layer_0__.w0" - size: 800 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 4 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___fc_layer_0__.wbias" - size: 4 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 4 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___crf_layer_0__.w0" - size: 24 - initial_mean: 0.0 - initial_std: 0.5 - dims: 4 - dims: 6 - initial_strategy: 0 - initial_smart: true -} -input_layer_names: "input" -input_layer_names: "labels" -input_layer_names: "crf_label" -input_layer_names: "left" -input_layer_names: "right" -input_layer_names: "label" -input_layer_names: "list_feature" -input_layer_names: "list_scores" -input_layer_names: "probs" -input_layer_names: "xe-label" -input_layer_names: "huber_probs" -input_layer_names: "huber_label" -output_layer_names: "__ctc_layer_0__" -output_layer_names: "__crf_layer_0__" -output_layer_names: "__rank_cost_0__" -output_layer_names: "__lambda_cost_0__" -output_layer_names: "__cross_entropy_0__" -output_layer_names: "__cross_entropy_with_selfnorm_0__" -output_layer_names: "__huber_cost_0__" -output_layer_names: "__multi_binary_label_cross_entropy_0__" -sub_models { - name: "root" - layer_names: "input" - layer_names: "labels" - layer_names: "probs" - layer_names: "xe-label" - layer_names: "__ctc_layer_0__" - layer_names: "__fc_layer_0__" - layer_names: "crf_label" - layer_names: "__crf_layer_0__" - layer_names: "left" - layer_names: "right" - layer_names: "label" - layer_names: "__rank_cost_0__" - layer_names: "list_feature" - layer_names: "list_scores" - layer_names: "__lambda_cost_0__" - layer_names: "__cross_entropy_0__" - layer_names: "__cross_entropy_with_selfnorm_0__" - layer_names: "huber_probs" - layer_names: "huber_label" - layer_names: "__huber_cost_0__" - layer_names: "__multi_binary_label_cross_entropy_0__" - input_layer_names: "input" - input_layer_names: "labels" - input_layer_names: "crf_label" - input_layer_names: "left" - input_layer_names: "right" - input_layer_names: "label" - input_layer_names: "list_feature" - input_layer_names: "list_scores" - input_layer_names: "probs" - input_layer_names: "xe-label" - input_layer_names: "huber_probs" - input_layer_names: "huber_label" - output_layer_names: "__ctc_layer_0__" - output_layer_names: "__crf_layer_0__" - output_layer_names: "__rank_cost_0__" - output_layer_names: "__lambda_cost_0__" - output_layer_names: "__cross_entropy_0__" - output_layer_names: "__cross_entropy_with_selfnorm_0__" - output_layer_names: "__huber_cost_0__" - output_layer_names: "__multi_binary_label_cross_entropy_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr deleted file mode 100644 index de58f5c64969b..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers_with_weight.protostr +++ /dev/null @@ -1,111 +0,0 @@ -type: "nn" -layers { - name: "input" - type: "data" - size: 300 - active_type: "" -} -layers { - name: "label" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "weight" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 10 - active_type: "softmax" - inputs { - input_layer_name: "input" - input_parameter_name: "___fc_layer_0__.w0" - } - bias_parameter_name: "___fc_layer_0__.wbias" -} -layers { - name: "__cost_0__" - type: "multi-class-cross-entropy" - size: 1 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - } - inputs { - input_layer_name: "label" - } - inputs { - input_layer_name: "weight" - } - coeff: 1.0 -} -layers { - name: "__regression_cost_0__" - type: "square_error" - size: 1 - active_type: "" - inputs { - input_layer_name: "__fc_layer_0__" - } - inputs { - input_layer_name: "label" - } - inputs { - input_layer_name: "weight" - } - coeff: 1.0 -} -parameters { - name: "___fc_layer_0__.w0" - size: 3000 - initial_mean: 0.0 - initial_std: 0.0577350258827 - dims: 300 - dims: 10 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___fc_layer_0__.wbias" - size: 10 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 10 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "input" -input_layer_names: "label" -input_layer_names: "weight" -output_layer_names: "__cost_0__" -output_layer_names: "__regression_cost_0__" -evaluators { - name: "classification_error_evaluator" - type: "classification_error" - input_layers: "__fc_layer_0__" - input_layers: "label" - input_layers: "weight" -} -sub_models { - name: "root" - layer_names: "input" - layer_names: "label" - layer_names: "weight" - layer_names: "__fc_layer_0__" - layer_names: "__cost_0__" - layer_names: "__regression_cost_0__" - input_layer_names: "input" - input_layer_names: "label" - input_layer_names: "weight" - output_layer_names: "__cost_0__" - output_layer_names: "__regression_cost_0__" - evaluator_names: "classification_error_evaluator" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr deleted file mode 100644 index f4b36052264bc..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_expand_layer.protostr +++ /dev/null @@ -1,56 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 30 - active_type: "" -} -layers { - name: "data_seq" - type: "data" - size: 30 - active_type: "" -} -layers { - name: "__expand_layer_0__" - type: "expand" - size: 30 - active_type: "" - inputs { - input_layer_name: "data" - } - inputs { - input_layer_name: "data_seq" - } - trans_type: "seq" -} -layers { - name: "__expand_layer_1__" - type: "expand" - size: 30 - active_type: "" - inputs { - input_layer_name: "data" - } - inputs { - input_layer_name: "data_seq" - } - trans_type: "non-seq" -} -input_layer_names: "data" -input_layer_names: "data_seq" -output_layer_names: "__expand_layer_0__" -output_layer_names: "__expand_layer_1__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "data_seq" - layer_names: "__expand_layer_0__" - layer_names: "__expand_layer_1__" - input_layer_names: "data" - input_layer_names: "data_seq" - output_layer_names: "__expand_layer_0__" - output_layer_names: "__expand_layer_1__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr deleted file mode 100644 index 80b01246ba96f..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_fc.protostr +++ /dev/null @@ -1,98 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__trans_layer_0__" - type: "trans" - size: 100 - active_type: "" - inputs { - input_layer_name: "data" - } -} -layers { - name: "__fc_layer_0__" - type: "fc" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "__trans_layer_0__" - input_parameter_name: "___fc_layer_0__.w0" - } -} -layers { - name: "mask" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__selective_fc_layer_0__" - type: "selective_fc" - size: 100 - active_type: "sigmoid" - inputs { - input_layer_name: "data" - input_parameter_name: "___selective_fc_layer_0__.w0" - } - inputs { - input_layer_name: "mask" - } - bias_parameter_name: "___selective_fc_layer_0__.wbias" - selective_fc_pass_generation: false - has_selected_colums: true - selective_fc_full_mul_ratio: 0.019999999553 -} -parameters { - name: "___fc_layer_0__.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___selective_fc_layer_0__.w0" - size: 10000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - initial_strategy: 0 - initial_smart: true - is_sparse: false -} -parameters { - name: "___selective_fc_layer_0__.wbias" - size: 100 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 100 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "data" -input_layer_names: "mask" -output_layer_names: "__fc_layer_0__" -output_layer_names: "__selective_fc_layer_0__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__trans_layer_0__" - layer_names: "__fc_layer_0__" - layer_names: "mask" - layer_names: "__selective_fc_layer_0__" - input_layer_names: "data" - input_layer_names: "mask" - output_layer_names: "__fc_layer_0__" - output_layer_names: "__selective_fc_layer_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr deleted file mode 100644 index 81577910ccf34..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_grumemory_layer.protostr +++ /dev/null @@ -1,51 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 120 - active_type: "" -} -layers { - name: "__gru_0__" - type: "gated_recurrent" - size: 40 - active_type: "sigmoid" - inputs { - input_layer_name: "data" - input_parameter_name: "___gru_0__.w0" - } - bias_parameter_name: "___gru_0__.wbias" - reversed: true - active_gate_type: "tanh" -} -parameters { - name: "___gru_0__.w0" - size: 4800 - initial_mean: 0.0 - initial_std: 0.158113881946 - dims: 40 - dims: 120 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___gru_0__.wbias" - size: 120 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 120 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "data" -output_layer_names: "__gru_0__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__gru_0__" - input_layer_names: "data" - output_layer_names: "__gru_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr deleted file mode 100644 index e8cc61b8c5410..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_hsigmoid.protostr +++ /dev/null @@ -1,62 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "label" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "__hsigmoid_0__" - type: "hsigmoid" - size: 1 - active_type: "" - inputs { - input_layer_name: "data" - input_parameter_name: "___hsigmoid_0__.w0" - } - inputs { - input_layer_name: "label" - } - bias_parameter_name: "___hsigmoid_0__.wbias" - num_classes: 10 -} -parameters { - name: "___hsigmoid_0__.w0" - size: 900 - initial_mean: 0.0 - initial_std: 0.333333343267 - dims: 9 - dims: 100 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___hsigmoid_0__.wbias" - size: 9 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 9 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "data" -input_layer_names: "label" -output_layer_names: "__hsigmoid_0__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "label" - layer_names: "__hsigmoid_0__" - input_layer_names: "data" - input_layer_names: "label" - output_layer_names: "__hsigmoid_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr deleted file mode 100644 index 8341cd2684746..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_lstmemory_layer.protostr +++ /dev/null @@ -1,53 +0,0 @@ -type: "nn" -layers { - name: "data" - type: "data" - size: 128 - active_type: "" -} -layers { - name: "__lstmemory_0__" - type: "lstmemory" - size: 32 - active_type: "tanh" - inputs { - input_layer_name: "data" - input_parameter_name: "___lstmemory_0__.w0" - } - bias_parameter_name: "___lstmemory_0__.wbias" - reversed: true - active_gate_type: "tanh" - active_state_type: "tanh" -} -parameters { - name: "___lstmemory_0__.w0" - size: 4096 - initial_mean: 0.0 - initial_std: 0.176776692271 - dims: 32 - dims: 32 - dims: 4 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstmemory_0__.wbias" - size: 224 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 224 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "data" -output_layer_names: "__lstmemory_0__" -sub_models { - name: "root" - layer_names: "data" - layer_names: "__lstmemory_0__" - input_layer_names: "data" - output_layer_names: "__lstmemory_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_maxout.protostr deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr deleted file mode 100644 index 44400e2c3a23d..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_ntm_layers.protostr +++ /dev/null @@ -1,225 +0,0 @@ -type: "nn" -layers { - name: "w" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "a" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "b" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "c" - type: "data" - size: 200 - active_type: "" -} -layers { - name: "d" - type: "data" - size: 31 - active_type: "" -} -layers { - name: "__interpolation_layer_0__" - type: "interpolation" - size: 100 - active_type: "" - inputs { - input_layer_name: "w" - } - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "b" - } -} -layers { - name: "__power_layer_0__" - type: "power" - size: 100 - active_type: "" - inputs { - input_layer_name: "w" - } - inputs { - input_layer_name: "a" - } -} -layers { - name: "__scaling_layer_0__" - type: "scaling" - size: 100 - active_type: "" - inputs { - input_layer_name: "w" - } - inputs { - input_layer_name: "a" - } -} -layers { - name: "__cos_sim_0__" - type: "cos" - size: 1 - active_type: "" - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "b" - } - cos_scale: 5.0 -} -layers { - name: "__cos_sim_1__" - type: "cos_vm" - size: 2 - active_type: "" - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "c" - } - cos_scale: 5.0 -} -layers { - name: "__sum_to_one_norm_layer_0__" - type: "sum_to_one_norm" - size: 100 - active_type: "" - inputs { - input_layer_name: "a" - } -} -layers { - name: "__conv_shift_layer_0__" - type: "conv_shift" - size: 100 - active_type: "" - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "d" - } -} -layers { - name: "__tensor_layer_0__" - type: "tensor" - size: 1000 - active_type: "" - inputs { - input_layer_name: "a" - input_parameter_name: "___tensor_layer_0__.w0" - } - inputs { - input_layer_name: "b" - } - bias_parameter_name: "___tensor_layer_0__.wbias" -} -layers { - name: "__slope_intercept_layer_0__" - type: "slope_intercept" - size: 100 - active_type: "" - inputs { - input_layer_name: "a" - } - slope: 0.699999988079 - intercept: 0.899999976158 -} -layers { - name: "__linear_comb_layer_0__" - type: "convex_comb" - size: 2 - active_type: "" - inputs { - input_layer_name: "b" - } - inputs { - input_layer_name: "c" - } -} -parameters { - name: "___tensor_layer_0__.w0" - size: 10000000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 100 - dims: 1000 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___tensor_layer_0__.wbias" - size: 1000 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 1000 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "w" -input_layer_names: "a" -input_layer_names: "b" -input_layer_names: "c" -input_layer_names: "d" -output_layer_names: "__interpolation_layer_0__" -output_layer_names: "__power_layer_0__" -output_layer_names: "__scaling_layer_0__" -output_layer_names: "__cos_sim_0__" -output_layer_names: "__cos_sim_1__" -output_layer_names: "__sum_to_one_norm_layer_0__" -output_layer_names: "__conv_shift_layer_0__" -output_layer_names: "__tensor_layer_0__" -output_layer_names: "__slope_intercept_layer_0__" -output_layer_names: "__linear_comb_layer_0__" -sub_models { - name: "root" - layer_names: "w" - layer_names: "a" - layer_names: "b" - layer_names: "c" - layer_names: "d" - layer_names: "__interpolation_layer_0__" - layer_names: "__power_layer_0__" - layer_names: "__scaling_layer_0__" - layer_names: "__cos_sim_0__" - layer_names: "__cos_sim_1__" - layer_names: "__sum_to_one_norm_layer_0__" - layer_names: "__conv_shift_layer_0__" - layer_names: "__tensor_layer_0__" - layer_names: "__slope_intercept_layer_0__" - layer_names: "__linear_comb_layer_0__" - input_layer_names: "w" - input_layer_names: "a" - input_layer_names: "b" - input_layer_names: "c" - input_layer_names: "d" - output_layer_names: "__interpolation_layer_0__" - output_layer_names: "__power_layer_0__" - output_layer_names: "__scaling_layer_0__" - output_layer_names: "__cos_sim_0__" - output_layer_names: "__cos_sim_1__" - output_layer_names: "__sum_to_one_norm_layer_0__" - output_layer_names: "__conv_shift_layer_0__" - output_layer_names: "__tensor_layer_0__" - output_layer_names: "__slope_intercept_layer_0__" - output_layer_names: "__linear_comb_layer_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr deleted file mode 100644 index c402aff174ab7..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_print_layer.protostr +++ /dev/null @@ -1,26 +0,0 @@ -type: "nn" -layers { - name: "input" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__print_0__" - type: "print" - active_type: "" - inputs { - input_layer_name: "input" - } -} -input_layer_names: "input" -output_layer_names: "input" -sub_models { - name: "root" - layer_names: "input" - layer_names: "__print_0__" - input_layer_names: "input" - output_layer_names: "input" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr deleted file mode 100644 index dfb5ce20a31a0..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_rnn_group.protostr +++ /dev/null @@ -1,650 +0,0 @@ -type: "recurrent_nn" -layers { - name: "seq_input" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "sub_seq_input" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "label" - type: "data" - size: 1 - active_type: "" -} -layers { - name: "__mixed_0__" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "seq_input" - input_parameter_name: "___mixed_0__.w0" - proj_conf { - type: "fc" - name: "___mixed_0__.w0" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__mixed_1__" - type: "mixed" - size: 300 - active_type: "" - inputs { - input_layer_name: "seq_input" - input_parameter_name: "___mixed_1__.w0" - proj_conf { - type: "fc" - name: "___mixed_1__.w0" - input_size: 100 - output_size: 300 - } - } -} -layers { - name: "__recurrent_group_0__" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "seq_input@__recurrent_group_0__" - type: "scatter_agent" - size: 100 - active_type: "" -} -layers { - name: "rnn_forward+delay1@__recurrent_group_0__" - type: "agent" - size: 200 - active_type: "" -} -layers { - name: "rnn_forward@__recurrent_group_0__" - type: "fc" - size: 200 - active_type: "tanh" - inputs { - input_layer_name: "seq_input@__recurrent_group_0__" - input_parameter_name: "_rnn_forward@__recurrent_group_0__.w0" - } - inputs { - input_layer_name: "rnn_forward+delay1@__recurrent_group_0__" - input_parameter_name: "_rnn_forward@__recurrent_group_0__.w1" - } - bias_parameter_name: "_rnn_forward@__recurrent_group_0__.wbias" -} -layers { - name: "rnn_forward" - type: "gather_agent" - size: 200 - active_type: "" -} -layers { - name: "__last_seq_0__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "rnn_forward" - } - trans_type: "non-seq" -} -layers { - name: "__recurrent_group_1__" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "seq_input@__recurrent_group_1__" - type: "scatter_agent" - size: 100 - active_type: "" -} -layers { - name: "rnn_back+delay1@__recurrent_group_1__" - type: "agent" - size: 200 - active_type: "" -} -layers { - name: "rnn_back@__recurrent_group_1__" - type: "fc" - size: 200 - active_type: "tanh" - inputs { - input_layer_name: "seq_input@__recurrent_group_1__" - input_parameter_name: "_rnn_back@__recurrent_group_1__.w0" - } - inputs { - input_layer_name: "rnn_back+delay1@__recurrent_group_1__" - input_parameter_name: "_rnn_back@__recurrent_group_1__.w1" - } - bias_parameter_name: "_rnn_back@__recurrent_group_1__.wbias" -} -layers { - name: "rnn_back" - type: "gather_agent" - size: 200 - active_type: "" -} -layers { - name: "__first_seq_0__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "rnn_back" - } - select_first: true - trans_type: "non-seq" -} -layers { - name: "__recurrent_group_2__" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "sub_seq_input@__recurrent_group_2__" - type: "sequence_scatter_agent" - size: 100 - active_type: "" -} -layers { - name: "rnn_subseq_forward+delay1@__recurrent_group_2__" - type: "agent" - size: 200 - active_type: "" -} -layers { - name: "rnn_subseq_forward@__recurrent_group_2__" - type: "fc" - size: 200 - active_type: "tanh" - inputs { - input_layer_name: "sub_seq_input@__recurrent_group_2__" - input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w0" - } - inputs { - input_layer_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" - input_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.w1" - } - bias_parameter_name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" -} -layers { - name: "rnn_subseq_forward" - type: "sequence_gather_agent" - size: 200 - active_type: "" -} -layers { - name: "__last_seq_1__" - type: "seqlastins" - size: 200 - active_type: "linear" - inputs { - input_layer_name: "rnn_subseq_forward" - } - trans_type: "non-seq" -} -layers { - name: "__lstm_group_0___recurrent_group" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "__mixed_0__@__lstm_group_0___recurrent_group" - type: "scatter_agent" - size: 400 - active_type: "" -} -layers { - name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - type: "mixed" - size: 400 - active_type: "" - inputs { - input_layer_name: "__mixed_0__@__lstm_group_0___recurrent_group" - proj_conf { - type: "identity" - name: "___lstm_group_0___input_recurrent.w0" - input_size: 400 - output_size: 400 - } - } - inputs { - input_layer_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - input_parameter_name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" - proj_conf { - type: "fc" - name: "___lstm_group_0___input_recurrent.w1" - input_size: 100 - output_size: 400 - } - } -} -layers { - name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - type: "lstm_step" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - } - inputs { - input_layer_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - } - bias_parameter_name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" - active_gate_type: "sigmoid" - active_state_type: "sigmoid" -} -layers { - name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - type: "get_output" - size: 100 - active_type: "" - inputs { - input_layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - input_layer_argument: "state" - } -} -layers { - name: "__lstm_group_0__" - type: "gather_agent" - size: 100 - active_type: "" -} -layers { - name: "__last_seq_2__" - type: "seqlastins" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "__lstm_group_0__" - } - trans_type: "non-seq" -} -layers { - name: "__gru_group_0___recurrent_group" - type: "recurrent_layer_group" - active_type: "" -} -layers { - name: "__mixed_1__@__gru_group_0___recurrent_group" - type: "scatter_agent" - size: 300 - active_type: "" -} -layers { - name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" - type: "agent" - size: 100 - active_type: "" -} -layers { - name: "__gru_group_0__@__gru_group_0___recurrent_group" - type: "gru_step" - size: 100 - active_type: "tanh" - inputs { - input_layer_name: "__mixed_1__@__gru_group_0___recurrent_group" - input_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" - } - inputs { - input_layer_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" - } - bias_parameter_name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" - active_gate_type: "sigmoid" -} -layers { - name: "__gru_group_0__" - type: "gather_agent" - size: 100 - active_type: "" -} -layers { - name: "__last_seq_3__" - type: "seqlastins" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "__gru_group_0__" - } - trans_type: "non-seq" -} -parameters { - name: "___mixed_0__.w0" - size: 40000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 400 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___mixed_1__.w0" - size: 30000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 300 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_forward@__recurrent_group_0__.w0" - size: 20000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_forward@__recurrent_group_0__.w1" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_forward@__recurrent_group_0__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_rnn_back@__recurrent_group_1__.w0" - size: 20000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_back@__recurrent_group_1__.w1" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_back@__recurrent_group_1__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "_rnn_subseq_forward@__recurrent_group_2__.w0" - size: 20000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_subseq_forward@__recurrent_group_2__.w1" - size: 40000 - initial_mean: 0.0 - initial_std: 0.0707106813788 - dims: 200 - dims: 200 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "_rnn_subseq_forward@__recurrent_group_2__.wbias" - size: 200 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 200 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group.w1" - size: 40000 - initial_mean: 0.0 - initial_std: 0.10000000149 - dims: 100 - dims: 400 - initial_strategy: 0 - initial_smart: true -} -parameters { - name: "___lstm_group_0__@__lstm_group_0___recurrent_group.wbias" - size: 300 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 300 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___gru_group_0__@__gru_group_0___recurrent_group.w0" - size: 30000 - initial_mean: 0.0 - initial_std: 0.00999999977648 - dims: 100 - dims: 300 - initial_strategy: 0 - initial_smart: false -} -parameters { - name: "___gru_group_0__@__gru_group_0___recurrent_group.wbias" - size: 300 - initial_mean: 0.0 - initial_std: 0.0 - dims: 1 - dims: 300 - initial_strategy: 0 - initial_smart: false -} -input_layer_names: "seq_input" -input_layer_names: "sub_seq_input" -output_layer_names: "__last_seq_0__" -output_layer_names: "__first_seq_0__" -output_layer_names: "__last_seq_1__" -output_layer_names: "__last_seq_2__" -output_layer_names: "__last_seq_3__" -sub_models { - name: "root" - layer_names: "seq_input" - layer_names: "sub_seq_input" - layer_names: "label" - layer_names: "__mixed_0__" - layer_names: "__mixed_1__" - layer_names: "__recurrent_group_0__" - layer_names: "rnn_forward" - layer_names: "__last_seq_0__" - layer_names: "__recurrent_group_1__" - layer_names: "rnn_back" - layer_names: "__first_seq_0__" - layer_names: "__recurrent_group_2__" - layer_names: "rnn_subseq_forward" - layer_names: "__last_seq_1__" - layer_names: "__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__" - layer_names: "__last_seq_2__" - layer_names: "__gru_group_0___recurrent_group" - layer_names: "__gru_group_0__" - layer_names: "__last_seq_3__" - input_layer_names: "seq_input" - input_layer_names: "sub_seq_input" - output_layer_names: "__last_seq_0__" - output_layer_names: "__first_seq_0__" - output_layer_names: "__last_seq_1__" - output_layer_names: "__last_seq_2__" - output_layer_names: "__last_seq_3__" - is_recurrent_layer_group: false -} -sub_models { - name: "__recurrent_group_0__" - layer_names: "seq_input@__recurrent_group_0__" - layer_names: "rnn_forward+delay1@__recurrent_group_0__" - layer_names: "rnn_forward@__recurrent_group_0__" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "rnn_forward@__recurrent_group_0__" - link_name: "rnn_forward+delay1@__recurrent_group_0__" - is_sequence: false - } - in_links { - layer_name: "seq_input" - link_name: "seq_input@__recurrent_group_0__" - has_subseq: false - } - out_links { - layer_name: "rnn_forward@__recurrent_group_0__" - link_name: "rnn_forward" - has_subseq: false - } - target_inlinkid: -1 -} -sub_models { - name: "__recurrent_group_1__" - layer_names: "seq_input@__recurrent_group_1__" - layer_names: "rnn_back+delay1@__recurrent_group_1__" - layer_names: "rnn_back@__recurrent_group_1__" - is_recurrent_layer_group: true - reversed: true - memories { - layer_name: "rnn_back@__recurrent_group_1__" - link_name: "rnn_back+delay1@__recurrent_group_1__" - is_sequence: false - } - in_links { - layer_name: "seq_input" - link_name: "seq_input@__recurrent_group_1__" - has_subseq: false - } - out_links { - layer_name: "rnn_back@__recurrent_group_1__" - link_name: "rnn_back" - has_subseq: false - } - target_inlinkid: -1 -} -sub_models { - name: "__recurrent_group_2__" - layer_names: "sub_seq_input@__recurrent_group_2__" - layer_names: "rnn_subseq_forward+delay1@__recurrent_group_2__" - layer_names: "rnn_subseq_forward@__recurrent_group_2__" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "rnn_subseq_forward@__recurrent_group_2__" - link_name: "rnn_subseq_forward+delay1@__recurrent_group_2__" - is_sequence: false - } - in_links { - layer_name: "sub_seq_input" - link_name: "sub_seq_input@__recurrent_group_2__" - has_subseq: true - } - out_links { - layer_name: "rnn_subseq_forward@__recurrent_group_2__" - link_name: "rnn_subseq_forward" - has_subseq: true - } - target_inlinkid: -1 -} -sub_models { - name: "__lstm_group_0___recurrent_group" - layer_names: "__mixed_0__@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___input_recurrent@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0__@__lstm_group_0___recurrent_group" - layer_names: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0__+delay1@__lstm_group_0___recurrent_group" - is_sequence: false - } - memories { - layer_name: "__lstm_group_0___state@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0___state+delay1@__lstm_group_0___recurrent_group" - is_sequence: false - } - in_links { - layer_name: "__mixed_0__" - link_name: "__mixed_0__@__lstm_group_0___recurrent_group" - has_subseq: false - } - out_links { - layer_name: "__lstm_group_0__@__lstm_group_0___recurrent_group" - link_name: "__lstm_group_0__" - has_subseq: false - } - target_inlinkid: -1 -} -sub_models { - name: "__gru_group_0___recurrent_group" - layer_names: "__mixed_1__@__gru_group_0___recurrent_group" - layer_names: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" - layer_names: "__gru_group_0__@__gru_group_0___recurrent_group" - is_recurrent_layer_group: true - reversed: false - memories { - layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" - link_name: "__gru_group_0__+delay1@__gru_group_0___recurrent_group" - is_sequence: false - } - in_links { - layer_name: "__mixed_1__" - link_name: "__mixed_1__@__gru_group_0___recurrent_group" - has_subseq: false - } - out_links { - layer_name: "__gru_group_0__@__gru_group_0___recurrent_group" - link_name: "__gru_group_0__" - has_subseq: false - } - target_inlinkid: -1 -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr b/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr deleted file mode 100644 index 1999c006d237e..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/test_sequence_pooling.protostr +++ /dev/null @@ -1,111 +0,0 @@ -type: "nn" -layers { - name: "dat_in" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__seq_pooling_0__" - type: "max" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - trans_type: "seq" -} -layers { - name: "__seq_pooling_1__" - type: "max" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - trans_type: "non-seq" -} -layers { - name: "__seq_pooling_2__" - type: "average" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - average_strategy: "average" - trans_type: "seq" -} -layers { - name: "__seq_pooling_3__" - type: "average" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - average_strategy: "average" - trans_type: "non-seq" -} -layers { - name: "__seq_pooling_4__" - type: "average" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - average_strategy: "sum" - trans_type: "seq" -} -layers { - name: "__seq_pooling_5__" - type: "average" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - average_strategy: "sum" - trans_type: "non-seq" -} -layers { - name: "__seq_pooling_6__" - type: "max" - size: 100 - active_type: "linear" - inputs { - input_layer_name: "dat_in" - } - output_max_index: true - trans_type: "non-seq" -} -input_layer_names: "dat_in" -output_layer_names: "__seq_pooling_0__" -output_layer_names: "__seq_pooling_1__" -output_layer_names: "__seq_pooling_2__" -output_layer_names: "__seq_pooling_3__" -output_layer_names: "__seq_pooling_4__" -output_layer_names: "__seq_pooling_5__" -output_layer_names: "__seq_pooling_6__" -sub_models { - name: "root" - layer_names: "dat_in" - layer_names: "__seq_pooling_0__" - layer_names: "__seq_pooling_1__" - layer_names: "__seq_pooling_2__" - layer_names: "__seq_pooling_3__" - layer_names: "__seq_pooling_4__" - layer_names: "__seq_pooling_5__" - layer_names: "__seq_pooling_6__" - input_layer_names: "dat_in" - output_layer_names: "__seq_pooling_0__" - output_layer_names: "__seq_pooling_1__" - output_layer_names: "__seq_pooling_2__" - output_layer_names: "__seq_pooling_3__" - output_layer_names: "__seq_pooling_4__" - output_layer_names: "__seq_pooling_5__" - output_layer_names: "__seq_pooling_6__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr deleted file mode 100644 index 89ed28406e553..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/unused_layers.protostr +++ /dev/null @@ -1,27 +0,0 @@ -type: "nn" -layers { - name: "probs" - type: "data" - size: 100 - active_type: "" -} -layers { - name: "__sampling_id_layer_0__" - type: "sampling_id" - size: 100 - active_type: "" - inputs { - input_layer_name: "probs" - } -} -input_layer_names: "probs" -output_layer_names: "__sampling_id_layer_0__" -sub_models { - name: "root" - layer_names: "probs" - layer_names: "__sampling_id_layer_0__" - input_layer_names: "probs" - output_layer_names: "__sampling_id_layer_0__" - is_recurrent_layer_group: false -} - diff --git a/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr deleted file mode 100644 index d0ad388165007..0000000000000 --- a/python/paddle/trainer_config_helpers/tests/configs/util_layers.protostr +++ /dev/null @@ -1,81 +0,0 @@ -type: "nn" -layers { - name: "a" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "b" - type: "data" - size: 10 - active_type: "" -} -layers { - name: "__addto_0__" - type: "addto" - size: 10 - active_type: "" - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "b" - } -} -layers { - name: "__concat_0__" - type: "concat" - size: 20 - active_type: "" - inputs { - input_layer_name: "a" - } - inputs { - input_layer_name: "b" - } -} -layers { - name: "__concat_1__" - type: "concat2" - size: 20 - active_type: "" - inputs { - input_layer_name: "a" - proj_conf { - type: "identity" - name: "___concat_1__.w0" - input_size: 10 - output_size: 10 - } - } - inputs { - input_layer_name: "b" - proj_conf { - type: "identity" - name: "___concat_1__.w1" - input_size: 10 - output_size: 10 - } - } -} -input_layer_names: "a" -input_layer_names: "b" -output_layer_names: "__addto_0__" -output_layer_names: "__concat_0__" -output_layer_names: "__concat_1__" -sub_models { - name: "root" - layer_names: "a" - layer_names: "b" - layer_names: "__addto_0__" - layer_names: "__concat_0__" - layer_names: "__concat_1__" - input_layer_names: "a" - input_layer_names: "b" - output_layer_names: "__addto_0__" - output_layer_names: "__concat_0__" - output_layer_names: "__concat_1__" - is_recurrent_layer_group: false -} - From c8091ad80f0676c75a3859cda197e1bd2a7e262e Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 19:46:00 +0800 Subject: [PATCH 120/180] Follow comments --- .../protostr/test_bilinear_interp.protostr | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr new file mode 100644 index 0000000000000..d4cbfc2389ac5 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr @@ -0,0 +1,123 @@ +type: "nn" +layers { + name: "data" + type: "data" + size: 2304 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconv" + size: 36864 + active_type: "" + inputs { + input_layer_name: "data" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 3 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 1 + output_x: 48 + img_size: 48 + caffe_mode: true + filter_size_y: 3 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 16 + shared_biases: true +} +layers { + name: "__bilinear_interp_layer_0__" + type: "bilinear_interp" + size: 65536 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + bilinear_interp_conf { + out_size_x: 64 + out_size_y: 64 + num_channels: 16 + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 16384 + active_type: "" + inputs { + input_layer_name: "__bilinear_interp_layer_0__" + pool_conf { + pool_type: "max-projection" + channels: 4 + size_x: 2 + stride: 2 + output_x: 64 + img_size: 128 + padding: 0 + size_y: 2 + stride_y: 2 + output_y: 64 + img_size_y: 128 + padding_y: 0 + } + } +} +layers { + name: "__fc_layer_0__" + type: "fc" + size: 384 + active_type: "tanh" + inputs { + input_layer_name: "__pool_0__" + input_parameter_name: "___fc_layer_0__.w0" + } +} +parameters { + name: "___conv_0__.w0" + size: 144 + initial_mean: 0.0 + initial_std: 0.471404522657 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 16 + initial_mean: 0.0 + initial_std: 0.0 + dims: 16 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___fc_layer_0__.w0" + size: 6291456 + initial_mean: 0.0 + initial_std: 0.0078125 + dims: 16384 + dims: 384 + initial_strategy: 0 + initial_smart: true +} +input_layer_names: "data" +output_layer_names: "__fc_layer_0__" +sub_models { + name: "root" + layer_names: "data" + layer_names: "__conv_0__" + layer_names: "__bilinear_interp_layer_0__" + layer_names: "__pool_0__" + layer_names: "__fc_layer_0__" + input_layer_names: "data" + output_layer_names: "__fc_layer_0__" + is_recurrent_layer_group: false +} + From dfbde28ad30243a4d7d6152c1c3abb82bb298e2e Mon Sep 17 00:00:00 2001 From: qijun Date: Wed, 9 Nov 2016 11:48:09 +0000 Subject: [PATCH 121/180] add some code comments for SppLayer --- paddle/gserver/layers/SpatialPyramidPoolLayer.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/gserver/layers/SpatialPyramidPoolLayer.h index 8416a717d654e..e15b6d2f85c6f 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/gserver/layers/SpatialPyramidPoolLayer.h @@ -24,6 +24,8 @@ namespace paddle { * @brief A layer for spatial pyramid pooling on the input image by taking * the max, average, etc. within regions, so that the result vector of * different sized images are of the same size. + * + * The config file api is spp_layer. */ class SpatialPyramidPoolLayer : public Layer { From 1c9f6f7f9902ce05e71fd62edea9b10f895009e5 Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 20:36:45 +0800 Subject: [PATCH 122/180] Update --- paddle/gserver/tests/test_LayerGrad.cpp | 47 +++++++++++++------------ 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index d8c45040d86bc..ad09d7d00e4f5 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -32,28 +32,6 @@ P_DECLARE_double(checkgrad_eps); P_DECLARE_bool(thread_local_rand_use_global_seed); P_DECLARE_bool(prev_batch_state); -TEST(Layer, BilinearInterpLayer) { - TestConfig config; - config.layerConfig.set_type("bilinear_interp"); - config.biasSize = 0; - - config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0}); - LayerInputConfig* input = config.layerConfig.add_inputs(); - BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf(); - - bilinear->set_img_size_x(32); - bilinear->set_img_size_y(32); - bilinear->set_num_channels(4); - - for (auto useGpu : {false, true}) { - for (auto out_size : {32, 64, 128}) { - bilinear->set_out_size_x(out_size); - bilinear->set_out_size_y(out_size); - testLayerGrad(config, "bilinear_interp", 10, false, useGpu); - } - } -} - TEST(Operator, dot_mul) { TestConfig config; config.layerConfig.set_size(10); @@ -197,6 +175,31 @@ TEST(Projection, conv) { } #endif +TEST(Layer, BilinearInterpLayer) { + TestConfig config; + config.layerConfig.set_type("bilinear_interp"); + config.biasSize = 0; + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0}); + + for (auto useGpu : {false, true}) { + for (auto out_size : {32, 64}) { + LOG(INFO) << " out_size_x=" << out_size + << " out_size_y=" << out_size; + LayerInputConfig* input + = config.layerConfig.add_inputs(); + BilinearInterpConfig* bilinear + = input->mutable_bilinear_interp_conf(); + bilinear->set_img_size_x(32); + bilinear->set_img_size_y(32); + bilinear->set_num_channels(4); + bilinear->set_out_size_x(out_size); + bilinear->set_out_size_y(out_size); + testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + } + } +} + TEST(Layer, concat) { TestConfig config; config.biasSize = 0; From 65b8bb25833f94453f17d8539fe77203bc88b9bb Mon Sep 17 00:00:00 2001 From: liaogang Date: Wed, 9 Nov 2016 21:07:25 +0800 Subject: [PATCH 123/180] Fix a bug --- paddle/gserver/tests/test_LayerGrad.cpp | 32 ++++++++++++------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index ad09d7d00e4f5..c1f7876c1722f 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -179,25 +179,23 @@ TEST(Layer, BilinearInterpLayer) { TestConfig config; config.layerConfig.set_type("bilinear_interp"); config.biasSize = 0; - config.inputDefs.push_back({INPUT_DATA, "layer_0", 4096, 0}); - for (auto useGpu : {false, true}) { - for (auto out_size : {32, 64}) { - LOG(INFO) << " out_size_x=" << out_size - << " out_size_y=" << out_size; - LayerInputConfig* input - = config.layerConfig.add_inputs(); - BilinearInterpConfig* bilinear - = input->mutable_bilinear_interp_conf(); - bilinear->set_img_size_x(32); - bilinear->set_img_size_y(32); - bilinear->set_num_channels(4); - bilinear->set_out_size_x(out_size); - bilinear->set_out_size_y(out_size); - testLayerGrad(config, "bilinear_interp", 10, false, useGpu); - } - } + LayerInputConfig* input = config.layerConfig.add_inputs(); + BilinearInterpConfig* bilinear = input->mutable_bilinear_interp_conf(); + bilinear->set_img_size_x(32); + bilinear->set_img_size_y(32); + bilinear->set_num_channels(4); + + bilinear->set_out_size_x(32); + bilinear->set_out_size_y(32); + testLayerGrad(config, "bilinear_interp", 10, false, false); + testLayerGrad(config, "bilinear_interp", 10, false, true); + + bilinear->set_out_size_x(64); + bilinear->set_out_size_y(64); + testLayerGrad(config, "bilinear_interp", 10, false, false); + testLayerGrad(config, "bilinear_interp", 10, false, true); } TEST(Layer, concat) { From 5c88f072621b52cd6069b365e85a0ae892ed8375 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 20 Oct 2016 17:37:10 -0700 Subject: [PATCH 124/180] initial take on deconv layers --- paddle/gserver/layers/ConvTransBaseLayer.cpp | 77 ++++ paddle/gserver/layers/ConvTransBaseLayer.h | 112 ++++++ .../gserver/layers/ExpandConvTransLayer.cpp | 332 ++++++++++++++++++ paddle/gserver/layers/ExpandConvTransLayer.h | 106 ++++++ paddle/gserver/tests/test_LayerGrad.cpp | 43 +++ 5 files changed, 670 insertions(+) create mode 100644 paddle/gserver/layers/ConvTransBaseLayer.cpp create mode 100644 paddle/gserver/layers/ConvTransBaseLayer.h create mode 100644 paddle/gserver/layers/ExpandConvTransLayer.cpp create mode 100644 paddle/gserver/layers/ExpandConvTransLayer.h diff --git a/paddle/gserver/layers/ConvTransBaseLayer.cpp b/paddle/gserver/layers/ConvTransBaseLayer.cpp new file mode 100644 index 0000000000000..68fb48a38b518 --- /dev/null +++ b/paddle/gserver/layers/ConvTransBaseLayer.cpp @@ -0,0 +1,77 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#include "paddle/utils/Logging.h" +#include "ConvTransBaseLayer.h" +namespace paddle { + +bool ConvTransBaseLayer::init(const LayerMap& layerMap, + const ParameterMap& parameterMap) { + /* Initialize the basic parent class */ + Layer::init(layerMap, parameterMap); + + /* Initialize the convolutional layer parameter */ + channel_ = config_.num_filters(); + sharedBiases_ = config_.shared_biases(); + for (auto& inputConfig : config_.inputs()) { + const ConvConfig& conf = inputConfig.conv_conf(); + padding_.push_back(conf.padding()); + stride_.push_back(conf.stride()); + filterSize_.push_back(conf.filter_size()); + paddingY_.push_back(conf.padding_y()); + strideY_.push_back(conf.stride_y()); + filterSizeY_.push_back(conf.filter_size_y()); + filterPixels_.push_back(filterSize_.back() * filterSizeY_.back()); + numFilters_.push_back(conf.channels()); + imgSize_.push_back(conf.img_size()); + imgPixels_.push_back(imgSize_.back() * imgSize_.back()); + groups_.push_back(conf.groups()); + filterChannels_.push_back(conf.filter_channels()); + outputX_.push_back(conf.output_x()); + outputs_.push_back(outputX_.back() * outputX_.back()); + } + + /* initialize the weightList */ + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + size_t height, width; + height = filterPixels_[i] * filterChannels_[i]; + width = numFilters_[i]; + + // create a new weight + CHECK_EQ(parameters_[i]->getSize(), width * height); + Weight* w = new Weight(height, width, parameters_[i]); + weights_.emplace_back(w); + } + + /* initialize the biases_ */ + if (biasParameter_.get() != NULL) { + if (sharedBiases_) { + CHECK_EQ((size_t)channel_, biasParameter_->getSize()); + biases_ = + std::unique_ptr(new Weight(channel_, 1, biasParameter_)); + } else { + biases_ = + std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); + } + } + + // default caffe model + caffeMode_ = true; + + return true; +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransBaseLayer.h b/paddle/gserver/layers/ConvTransBaseLayer.h new file mode 100644 index 0000000000000..467a260569759 --- /dev/null +++ b/paddle/gserver/layers/ConvTransBaseLayer.h @@ -0,0 +1,112 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#pragma once + +#include "Layer.h" +namespace paddle { + +/** + * @brief A Base Convolution Layer, which convolves the input image + * with learned filters and (optionally) adds biases. + */ + +class ConvTransBaseLayer : public Layer { +protected: + typedef std::vector IntV; + + /// The number of channel in image (the output of the deconv layer). + int channel_; + /// The x dimension of the padding. + IntV padding_; + /// The y dimension of the padding. + IntV paddingY_; + /// The x dimension of the stride. + IntV stride_; + /// The y dimension of the stride. + IntV strideY_; + /// The x dimension of a filter kernel. + IntV filterSize_; + /// The y dimension of a filter kernel. + IntV filterSizeY_; + /// The number of filters(i.e. the number channels of the deconv layer input) + IntV numFilters_; + /// The spatial dimensions of input feature map. + IntV imgSize_; + /// The total pixel size of input feature map. + /// imgPixels_ = imgSizeX_ * imgSizeY_. + IntV imgPixels_; + /// filterPixels_ = filterSizeX_ * filterSizeY_. + IntV filterPixels_; + /// filterChannels_ = channels_/groups_. + IntV filterChannels_; + /// The spatial dimensions of output feature map. + IntV outputX_; + /// The spatial dimensions of output feature map. + IntV outputs_; + /// Group size, refer to grouped convolution in + /// Alex Krizhevsky's paper: when group=2, the first half of the + /// filters are only connected to the first half of the input channels, + /// and the second half only connected to the second half. + IntV groups_; + /// Whether the bias is shared for feature in each channel. + bool sharedBiases_; + + /// shape of weight: (numChannels * filterPixels_, numFilters) + WeightList weights_; + /// If shared_biases is false shape of bias: (numFilters_, 1) + /// If shared_biases is ture shape of bias: + /// (numFilters_ * outputX * outputY, 1) + std::unique_ptr biases_; + + /// True by default. The only difference is the calculation + /// of output size. + bool caffeMode_; + +public: + explicit ConvTransBaseLayer(const LayerConfig& config) : Layer(config) {} + + virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + Weight& getWeight(int idx) { return *weights_[idx]; } + + /** + * Calculate image size based on caffeMode_ from outputSize. + * - input(+padding): 0123456789 + * - imageSize(+padding) = 10; + * - filterSize = 3; + * - stride = 2; + * - caffeMode_ is true: + - output: (012), (234), (456), (678) + - outputSize = 4; + * - caffeMode_ is false: + * - output: (012), (234), (456), (678), (9) + * - outputSize = 5; + */ + + int imageSize(int outputSize, int filterSize, int padding, int stride) { + int imageSize; + if (!caffeMode_) { + imageSize = + (outputSize - 1) * stride + filterSize - 2 * padding - stride + 1; + } else { + imageSize = (outputSize - 1) * stride + filterSize - 2 * padding; + } + CHECK_GE(imageSize, 1); + return imageSize; + } +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp new file mode 100644 index 0000000000000..56cc042653b60 --- /dev/null +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -0,0 +1,332 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#include "paddle/utils/Logging.h" +#include "paddle/utils/Stat.h" +#include "ExpandConvTransLayer.h" + +namespace paddle { + +REGISTER_LAYER(exconvt, ExpandConvTransLayer); + +bool ExpandConvTransLayer::init(const LayerMap &layerMap, + const ParameterMap ¶meterMap) { + /* Initialize the basic convolutional parent class */ + ConvTransBaseLayer::init(layerMap, parameterMap); + + /* Initialize the projection */ + for (auto &inputConfig : config_.inputs()) { + const ConvConfig &conf = inputConfig.conv_conf(); + subM_.push_back(conf.channels() / conf.groups()); + subN_.push_back(conf.output_x() * conf.output_x()); + subK_.push_back(channel_ * conf.filter_size() * conf.filter_size() / + conf.groups()); + /* Consistent caffe mode for multiple input */ + caffeMode_ = conf.caffe_mode(); + } + + return true; +} + +// Why this is necessary after calling init? +size_t ExpandConvTransLayer::getSize() { + CHECK_NE(inputLayers_.size(), 0UL); + imgSizeH_.clear(); + imgSizeW_.clear(); + outputH_.clear(); + outputW_.clear(); + subN_.clear(); + size_t layerSize = 0; + for (size_t i = 0; i < inputLayers_.size(); i++) { + outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (outputH_[i] == 0) outputH_[i] = outputX_[i]; + if (outputW_[i] == 0) outputW_[i] = outputX_[i]; + imgSizeH_.push_back( + imageSize(outputH_[i], filterSize_[i], padding_[i], stride_[i])); + imgSizeW_.push_back( + imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); + subN_.push_back(outputH_[i] * outputW_[i]); + CHECK(layerSize == 0 || + imgSizeH_[i] * imgSizeW_[i] * (size_t)channel_ == layerSize); + layerSize = imgSizeH_[i] * imgSizeW_[i] * channel_; + } + getOutput().setFrameHeight(imgSizeH_[0]); + getOutput().setFrameWidth(imgSizeW_[0]); + return layerSize; +} + +void ExpandConvTransLayer::resetExpandInput(size_t height, size_t width) { + Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); +} + +/*void ExpandConvTransLayer::resetConvOutput(size_t batchSize, int inIdx) { + Matrix::resizeOrCreate(transOutValue_, batchSize * numFilters_, subN_[inIdx], + false, useGpu_); +}*/ + + +void ExpandConvTransLayer::addSharedBias() { + size_t mapW = getSize() / channel_; + size_t mapH = getOutputValue()->getElementCnt() / mapW; + MatrixPtr out = + Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); + + Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); + + out->transpose(transOutValue_, false); // false means no memory allocation + transOutValue_->reshape(transOutValue_->getElementCnt() / channel_, + channel_); + + MatrixPtr bias = + Matrix::create(biases_->getW()->getData(), 1, + biases_->getW()->getElementCnt(), false, useGpu_); + transOutValue_->addBias(*bias, 1.0f); + + transOutValue_->reshape(mapW, mapH); + transOutValue_->transpose(out, false); // false means no memory allocation + + out->clear(); + bias->clear(); +} + +void ExpandConvTransLayer::addUnsharedBias() { + MatrixPtr outValue = getOutputValue(); + MatrixPtr bias = + Matrix::create(biases_->getW()->getData(), 1, + biases_->getW()->getElementCnt(), false, useGpu_); + outValue->addBias(*bias, 1.0f); +} + + +void ExpandConvTransLayer::expandOneFrame(MatrixPtr image, size_t startIdx, + int inIdx) { + resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); + real *imgData = image->getData() + startIdx * image->getWidth(); + MatrixPtr imageTmp = Matrix::create( + imgData, 1, imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel_, false, + useGpu_); + expandInput_->convExpand(*imageTmp, imgSizeH_[inIdx], imgSizeW_[inIdx], + channel_, filterSize_[inIdx], + filterSize_[inIdx], stride_[inIdx], stride_[inIdx], + padding_[inIdx], padding_[inIdx], + outputH_[inIdx], outputW_[inIdx]); + imageTmp->clear(); +} + +void ExpandConvTransLayer::expandBackOnce(MatrixPtr imageGrad, int inIdx, + int startIdx) { + int subM = subM_[inIdx]; + int subN = subN_[inIdx]; + int subK = subK_[inIdx]; + + LayerPtr prevLayer = getPrev(inIdx); + if (NULL == prevLayer->getOutputGrad()) { + return; + } + + expandOneFrame(imageGrad, startIdx, inIdx); + + real *outGradData = + prevLayer -> getOutputGrad()->getData() + + startIdx * subN * numFilters_[inIdx]; + + real *wgtData = weights_[inIdx]->getW()->getData(); + real *expInData = expandInput_->getData(); + for (int g = 0; g < groups_[inIdx]; ++g) { + MatrixPtr A = + Matrix::create(wgtData, subK, subM, true, useGpu_); // mark transpose + MatrixPtr B = Matrix::create(expInData, subK, subN, false, useGpu_); + MatrixPtr C = Matrix::create(outGradData, subM, subN, false, useGpu_); + C->mul(A, B, 1, 1); + + A->clear(); + B->clear(); + C->clear(); + wgtData += subK * subM; + expInData += subK * subN; + outGradData += subM * subN; + } +} + +void ExpandConvTransLayer::forward(PassType passType) { + Layer::forward(passType); + + /* malloc memory for the output_ if necessary */ + /* note: one sample correspond to one colum, and the + * transOutValue correspond sample to one row */ + int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); + resetOutput(batchSize, getSize()); + + MatrixPtr output = nullptr; + for (size_t i = 0; i != inputLayers_.size(); ++i) { + LayerPtr prevLayer = getPrev(i); + output = prevLayer->getOutputValue(); + REGISTER_TIMER_INFO("shrinkFwd", getName().c_str()); + shrinkFwd(output, i); + } + + /* add the bias-vector */ + if (biases_.get() != NULL) { + if (sharedBiases_) { + addSharedBias(); + } else { + addUnsharedBias(); + } + } + + /* activation */ + forwardActivation(); +} + +void ExpandConvTransLayer::shrinkFwd(MatrixPtr output, int inpIdx) { + int subM = subM_[inpIdx]; + int subN = subN_[inpIdx]; + int subK = subK_[inpIdx]; + + size_t batchSize = output->getHeight(); + MatrixPtr image = getOutputValue(); + + /* reset the expand-grad memory */ + resetExpandInput(subK * groups_[inpIdx], subN); + + real *localData = output->getData(); + real *imageData = image->getData(); + for (size_t n = 0; n < batchSize; n++) { + real *wgtData = weights_[inpIdx]->getW()->getData(); + real *expandInData = expandInput_->getData(); + + for (int g = 0; g < groups_[inpIdx]; g++) { + // create temporary matrix + MatrixPtr C = Matrix::create(expandInData, subK, subN, false, useGpu_); + MatrixPtr B = Matrix::create(localData, subM, subN, false, useGpu_); + MatrixPtr A = Matrix::create(wgtData, subK, subM, false, useGpu_); + C->mul(A, B); // mul + + // clear the temporary matrix + A->clear(); + B->clear(); + C->clear(); + + expandInData += subK * subN; + localData += subM * subN; + wgtData += subK * subM; + } + + // shrink one frame outGrad + MatrixPtr oneTmp = Matrix::create( + expandInput_->getData(), subK * groups_[inpIdx], subN, false, useGpu_); + MatrixPtr vTmp = Matrix::create( + imageData, 1, + imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel_, false, + useGpu_); + vTmp->convShrink(*oneTmp, imgSizeH_[inpIdx], imgSizeW_[inpIdx], + channel_, filterSize_[inpIdx], + filterSize_[inpIdx], stride_[inpIdx], stride_[inpIdx], + padding_[inpIdx], padding_[inpIdx], + outputH_[inpIdx], outputW_[inpIdx], 1.0f, 1.0f); + vTmp->clear(); + oneTmp->clear(); + + // move the data-pointer + imageData += imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel_; + } +} + +void ExpandConvTransLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { + size_t mapW = getSize() / channel_; + size_t mapH = v->getElementCnt() / mapW; + MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); + + Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); + + vTmp->transpose(transOutValue_, false); // false means no memory allocation + vTmp->reshape(transOutValue_->getElementCnt() / channel_, channel_); + biases->collectBias(*vTmp, 1.0f); +} + +void ExpandConvTransLayer::bpropBiases(MatrixPtr v) { + MatrixPtr biases = + Matrix::create(biases_->getWGrad()->getData(), 1, + biases_->getWGrad()->getElementCnt(), false, useGpu_); + if (sharedBiases_) { + bpropSharedBias(biases, v); + } else { + biases->collectBias(*v, 1.0f); + } + biases->clear(); +} + +void ExpandConvTransLayer::backward(const UpdateCallback &callback) { + backwardActivation(); + + MatrixPtr imageGrad = getOutputGrad(); + if (biases_ && biases_->getWGrad()) { + bpropBiases(imageGrad); + /* Increasing the number of gradient */ + biases_->getParameterPtr()->incUpdate(callback); + } + + for (size_t i = 0; i != inputLayers_.size(); ++i) { + /* First, calculate the input layers error */ + for (size_t off = 0; off < imageGrad->getHeight(); off++) { + expandBackOnce(imageGrad, i, off); + } + if (weights_[i]->getWGrad()) { + /* Then, calculate the W-gradient for the current layer */ + bpropWeights(imageGrad, i); + /* Increasing the number of gradient */ + weights_[i]->getParameterPtr()->incUpdate(callback); + } + } +} + +void ExpandConvTransLayer::bpropWeights(MatrixPtr v, int inpIdx) { + MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); + MatrixPtr outputV = getPrev(inpIdx)->getOutputValue(); + + int subM = subM_[inpIdx]; + int subN = subN_[inpIdx]; + int subK = subK_[inpIdx]; + size_t batchSize = outputV->getHeight(); + resetExpandInput(subK * groups_[inpIdx], subN); + + real *outputData = outputV -> getData(); + + for (size_t n = 0; n < batchSize; n++) { // frame by frame + // expand + expandOneFrame(v, n, inpIdx); + real *wGradData = weightGrad->getData(); + real *expandInData = expandInput_->getData(); + + // expand-mul one-group by one + for (int g = 0; g < groups_[inpIdx]; g++) { + MatrixPtr A = Matrix::create(expandInData, subK, subN, false, useGpu_); + MatrixPtr B = Matrix::create(outputData, subM, subN, true, useGpu_); + MatrixPtr C = Matrix::create(wGradData, subK, subM, false, useGpu_); + C->mul(A, B, 1, 1); + + A->clear(); + B->clear(); + C->clear(); + outputData += subM * subN; + wGradData += subK * subM; + expandInData += subK * subN; + } + } +} + + +} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h new file mode 100644 index 0000000000000..f19aa81f3d3c2 --- /dev/null +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -0,0 +1,106 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#pragma once + +#include "ConvTransBaseLayer.h" +#include "paddle/math/Matrix.h" +#include + +namespace paddle { + +/** + * @brief A subclass of convolution layer. + * This layer expands input and use matrix multiplication to + * calculate convolution operation. + * + * The config file api is img_conv_layer. + */ +class ExpandConvTransLayer : public ConvTransBaseLayer { +protected: + /// For expand convolution. + /// subM_ = numFilters_ / groups_. + IntV subM_; + /// subN_ = outputH_ * outputW_. + IntV subN_; + /// subK_ = channels_ * filterPixels_ * groups_. + IntV subK_; + /// The spatial dimensions of height of input feature map. + IntV imgSizeH_; + /// The spatial dimensions of width of input feature map. + IntV imgSizeW_; + /// The spatial dimensions of height of output feature map. + IntV outputH_; + /// The spatial dimensions of width of output feature map. + IntV outputW_; + /// Expand one sample at a time. shape: + /// (numChannels * filterPixels_, outputSizeH * outputSizeW) + MatrixPtr expandInput_; + /// The transpose of output, which is an auxiliary matrix. + MatrixPtr transOutValue_; + +public: + explicit ExpandConvTransLayer(const LayerConfig& config) : + ConvTransBaseLayer(config) {} + + ~ExpandConvTransLayer() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + size_t getSize(); + + /** + * Create or resize expandInput_. + */ + void resetExpandInput(size_t height, size_t width); + + /** + * Create or resize transOutValue_. + */ + void resetConvOutput(size_t batchSize, int inIdx); + + /** + * Expand one input sample. + */ + void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx); + + /** + * Expand one output image and perform matrix multiplication. + */ + void expandBackOnce(MatrixPtr image, int inIdx, int startIdx); + + /** + * Perform matrix multiplication on one output and then shrink. + */ + void shrinkFwd(MatrixPtr output, int inpIdx); + + /** + * Add shared bias. + */ + void addSharedBias(); + + /** + * Add unshared bias. + */ + void addUnsharedBias(); + void forward(PassType passType); + void bpropSharedBias(MatrixPtr biases, MatrixPtr v); + void bpropBiases(MatrixPtr v); + void backward(const UpdateCallback& callback); + void bpropWeights(MatrixPtr v, int inpIdx); + void bpropActs(MatrixPtr v, int inpIdx); +}; + +} // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 4e01fa91ed2ba..d634d198c3be7 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -312,6 +312,49 @@ TEST(Layer, convLayer) { #endif } + +void testConvTransLayer(const string& type, bool trans, bool useGpu) { + TestConfig config; + config.biasSize = 3; + config.layerConfig.set_type(type); + config.layerConfig.set_num_filters(3); + config.layerConfig.set_partial_sum(1); + config.layerConfig.set_shared_biases(true); + + config.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); + LayerInputConfig* input = config.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_filter_size(2); + conv->set_filter_size_y(3); + conv->set_channels(16); + conv->set_padding(0); + conv->set_padding_y(1); + conv->set_stride(2); + conv->set_stride_y(2); + conv->set_groups(1); + conv->set_filter_channels(3 / conv->groups()); + conv->set_img_size(16); + conv->set_output_x( + (2 * conv->padding() + conv->img_size() - conv->filter_size()) / + ((float)conv->stride()) + + 1.5); + + config.layerConfig.set_size(conv->img_size() * conv->img_size() * + config.layerConfig.num_filters()); + + testLayerGrad(config, "convTrans", 100, trans, useGpu); +} + +TEST(Layer, convTransLayer) { + testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ false); +/* +#ifndef PADDLE_ONLY_CPU + testConvLayer("exconv", trans= false, useGpu= true); + testConvLayer("cudnn_conv", trans= false, useGpu= true); +#endif +*/ +} + TEST(Layer, blockExpandLayer) { TestConfig config; config.biasSize = 0; From 70e44732c2c1a2186d26a076c3b3be69b6a91bc4 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 25 Oct 2016 13:55:40 -0700 Subject: [PATCH 125/180] added convTrans test and python components --- .gitignore | 2 + paddle/gserver/tests/CMakeLists.txt | 8 + paddle/gserver/tests/test_ConvTrans.cpp | 139 ++++++++++++++++++ python/paddle/trainer/config_parser.py | 95 ++++++++++++ .../paddle/trainer_config_helpers/layers.py | 123 ++++++++++++++++ 5 files changed, 367 insertions(+) create mode 100644 paddle/gserver/tests/test_ConvTrans.cpp diff --git a/.gitignore b/.gitignore index 65ba217de37c8..ee8489c1d71bd 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,6 @@ build/ .vscode .idea .project +.cproject .pydevproject +Makefile diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/gserver/tests/CMakeLists.txt index 26ee2b3aae64a..0651d0b4733ea 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/gserver/tests/CMakeLists.txt @@ -26,6 +26,14 @@ add_unittest_without_exec(test_ActivationGrad TestUtil.cpp) add_test(NAME test_ActivationGrad COMMAND test_ActivationGrad) +################# test_ConvTrans ####################### +add_unittest_without_exec(test_ConvTrans + test_ConvTrans.cpp + LayerGradUtil.cpp + TestUtil.cpp) + +add_test(NAME test_ConvTrans + COMMAND test_ConvTrans) ################## test_Evaluator ####################### add_unittest(test_Evaluator diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp new file mode 100644 index 0000000000000..e7cbe2614faca --- /dev/null +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -0,0 +1,139 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "paddle/gserver/layers/DataLayer.h" +#include "ModelConfig.pb.h" +#include "paddle/trainer/Trainer.h" +#include "paddle/utils/GlobalConstants.h" +#include "paddle/gserver/layers/ExpandConvTransLayer.h" + +#include "TestUtil.h" +#include "LayerGradUtil.h" + +using namespace paddle; // NOLINT +using namespace std; // NOLINT + +P_DECLARE_bool(use_gpu); +P_DECLARE_int32(gpu_id); +P_DECLARE_double(checkgrad_eps); +P_DECLARE_bool(thread_local_rand_use_global_seed); +P_DECLARE_bool(prev_batch_state); + +TEST(Layer, convTransLayerFwd) { + TestConfig configt; + configt.biasSize = 3; + configt.layerConfig.set_type("exconvt"); + configt.layerConfig.set_num_filters(3); + configt.layerConfig.set_partial_sum(1); + configt.layerConfig.set_shared_biases(true); + + configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); + LayerInputConfig* input = configt.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_filter_size(2); + conv->set_filter_size_y(3); + conv->set_channels(16); + conv->set_padding(0); + conv->set_padding_y(1); + conv->set_stride(2); + conv->set_stride_y(2); + conv->set_groups(1); + conv->set_filter_channels(3 / conv->groups()); + conv->set_img_size(16); + conv->set_output_x( + (2 * conv->padding() + conv->img_size() - conv->filter_size()) / + ((float)conv->stride()) + + 1.5); + + configt.layerConfig.set_size(conv->img_size() * conv->img_size() * + configt.layerConfig.num_filters()); + configt.layerConfig.set_name("convTrans"); + + // data layer initialize + std::vector dataLayers; + LayerMap layerMap; + vector datas; + initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans", + 100, false, useGpu); + // test layer initialize + std::vector parameters; + LayerPtr convtLayer; + initTestLayer(configt, &layerMap, ¶meters, &convtLayer); + convtLayer->getBiasParameter()->zeroMem(); + convtLayer->forward(PASS_GC); + + TestConfig config; + config.biasSize = 16; + config.layerConfig.set_type("exconv"); + config.layerConfig.set_num_filters(16); + config.layerConfig.set_partial_sum(1); + config.layerConfig.set_shared_biases(true); + + config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 288}); + input = config.layerConfig.add_inputs(); + conv = input->mutable_conv_conf(); + conv->set_filter_size(2); + conv->set_filter_size_y(3); + conv->set_channels(3); + conv->set_padding(0); + conv->set_padding_y(1); + conv->set_stride(2); + conv->set_stride_y(2); + conv->set_groups(1); + conv->set_filter_channels(conv->channels() / conv->groups()); + conv->set_img_size(16); + conv->set_output_x( + (2 * conv->padding() + conv->img_size() - conv->filter_size()) / + ((float)conv->stride()) + + 1.5); + config.layerConfig.set_size(conv->output_x() * conv->output_x() * + config.layerConfig.num_filters()); + config.layerConfig.set_name("conv"); + + // data layer initialize + std::vector dataLayers2; + LayerMap layerMap2; + vector datas2; + initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv", + 100, false, useGpu); + // test layer initialize + std::vector parameters2; + LayerPtr convLayer; + initTestLayer(config, &layerMap2, ¶meters2, &convLayer); + + convLayer->getBiasParameter()->zeroMem(); + convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom( + *(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE))); + + convLayer->forward(PASS_GC); + convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue())); + + vector callbackFlags(parameters2.size(), 0); + auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; }; + convLayer->backward(callback); + + checkMatrixEqual(convtLayer->getOutputValue(), + dataLayers2[0]->getOutputGrad()); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + initMain(argc, argv); + FLAGS_thread_local_rand_use_global_seed = true; + srand(1); + return RUN_ALL_TESTS(); +} diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 73631602a92be..2d28b34999cb0 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1106,6 +1106,37 @@ def parse_conv(conv, input_layer_name, conv_conf): conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) + +def parse_convt(conv, input_layer_name, conv_conf): + conv_conf.filter_size = conv.filter_size + conv_conf.filter_size_y = conv.filter_size_y + conv_conf.channels = conv.channels + conv_conf.padding = conv.padding + conv_conf.padding_y = conv.padding_y + conv_conf.stride = conv.stride + conv_conf.stride_y = conv.stride_y + conv_conf.groups = conv.groups + conv_conf.filter_channels = conv.channels / conv.groups + conv_conf.caffe_mode = conv.caffe_mode + + outputSize = g_layer_map[input_layer_name].size / conv.channels + print('channels=%d size=%d'%(conv.channels, + g_layer_map[input_layer_name].size)) + conv_conf.output_x = int(outputSize ** 0.5) + config_assert((conv_conf.output_x ** 2) == outputSize, + ("Input layer %s: Incorrect input image size %d for input " + + "image pixels %d") + % (input_layer_name, conv_conf.img_size, img_pixels)) + if conv.caffe_mode: + conv_conf.img_size = \ + (conv_conf.output_x - 1) * conv.stride \ + + conv.filter_size - 2 * conv.padding + else: + conv_conf.img_size = \ + (conv_conf.output_x - 1) * conv.stride \ + + conv.filter_size - 2 * conv.padding + 1 + + def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels block_expand_conf.stride_x = block_expand.stride_x @@ -1612,6 +1643,70 @@ class ConvLayer(ConvLayerBase): class ConvLayer(ConvLayerBase): layer_type = 'cudnn_conv' + +@config_layer('convt') +class ConvTransLayerBase(LayerBase): + layer_type = 'convt' + def __init__( + self, + name, + inputs=[], + bias=True, + num_filters=None, + shared_biases=False, + **xargs): + super(ConvLayerBase, self).__init__( + name, self.layer_type, 0, inputs=inputs, **xargs) + + if num_filters is not None: + self.config.num_filters = num_filters + + use_gpu = int(g_command_config_args.get("use_gpu", 0)) + parallel_nn = int(g_command_config_args.get("parallel_nn", 0)) + + # Automatically select cudnn_type for GPU and exconv for CPU + # if set type=conv, but still reserve the way user specify + # exconv or cudnn_conv manually. + if self.layer_type == "cudnn_convt": + config_assert(use_gpu, "cudnn_convt only support GPU") + + if (use_gpu == 1 and self.layer_type != "exconvt" and + (parallel_nn == 0 or self.config.device > -1)): + self.layer_type = "cudnn_convt" + else: + self.layer_type = "exconvt" + # need to specify layer in config + self.config.type = self.layer_type + + if shared_biases is not None: + self.config.shared_biases = shared_biases + + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + parse_convt( + self.inputs[input_index].conv, + input_layer.name, + self.config.inputs[input_index].conv_conf) + conv_conf = self.config.inputs[input_index].conv_conf + psize = self.calc_parameter_size(conv_conf) + print("output size for %s is %d " % (name, conv_conf.output_x)) + self.create_input_parameter(input_index, psize) + self.set_layer_size( + (conv_conf.img_size ** 2) * self.config.num_filters) + + psize = self.config.size + if shared_biases: + psize = self.config.num_filters + self.create_bias_parameter(bias, psize, [psize, 1]) + + def calc_parameter_size(self, conv_conf): + return conv_conf.channels() * conv_conf.filter_channels \ + * (conv_conf.filter_size * conv_conf.filter_size_y) + +@config_layer('exconvt') +class ConvTransLayer(ConvTransLayerBase): + layer_type = 'exconvt' + @config_layer('norm') class NormLayer(LayerBase): def __init__( diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 49f0ff3289db7..853df8b83709d 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -78,6 +78,7 @@ class LayerType(object): COSINE_SIM = 'cos' HSIGMOID = 'hsigmoid' CONV_LAYER = "conv" + CONVTRANS_LAYER = "convt" POOL_LAYER = "pool" BATCH_NORM_LAYER = 'batch_norm' NORM_LAYER = 'norm' @@ -1625,6 +1626,128 @@ def img_conv_layer(input, filter_size, num_filters, return LayerOutput(name, LayerType.CONV_LAYER, parents=[input], activation=act, num_filters=num_filters) +@wrap_name_default("convt") +@wrap_param_attr_default() +@wrap_bias_attr_default() +@wrap_act_default(act=ReluActivation()) +@layer_support(DROPOUT) +def img_convTrans_layer(input, filter_size, num_filters, + name=None, num_channels=None, + act=None, groups=1, stride=1, padding=0, bias_attr=None, + param_attr=None, shared_biases=True, layer_attr=None, + filter_size_y=None, stride_y=None, padding_y=None): + """ + Convolution Transpose (deconv) layer for image. Paddle only support square + input currently and thus input image's width equals height. + + The details of convolution transpose layer, + please refer to the following explanation and references therein + `_ . + + The num_channel means input image's channel number. It may be 1 or 3 when + input is raw pixels of image(mono or RGB), or it may be the previous layer's + num_filters * num_group. + + There are several group of filter in PaddlePaddle implementation. + Each group will process some channel of the inputs. For example, if an input + num_channel = 256, group = 4, num_filter=32, the PaddlePaddle will create + 32*4 = 128 filters to process inputs. The channels will be split into 4 + pieces. First 256/4 = 64 channels will process by first 32 filters. The + rest channels will be processed by rest group of filters. + + :param name: Layer name. + :type name: basestring + :param input: Layer Input. + :type input: LayerOutput + :param filter_size: The x dimension of a filter kernel. Or input a tuple for + two image dimension. + :type filter_size: int|tuple|list + :param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle + currently supports rectangular filters, the filter's + shape will be (filter_size, filter_size_y). + :type filter_size_y: int|None + :param num_filters: Each filter group's number of filter + :param act: Activation type. Default is tanh + :type act: BaseActivation + :param groups: Group size of filters. + :type groups: int + :param stride: The x dimension of the stride. Or input a tuple for two image + dimension. + :type stride: int|tuple|list + :param stride_y: The y dimension of the stride. + :type stride_y: int + :param padding: The x dimension of the padding. Or input a tuple for two + image dimension + :type padding: int|tuple|list + :param padding_y: The y dimension of the padding. + :type padding_y: int + :param bias_attr: Convolution bias attribute. None means default bias. + False means no bias. + :type bias_attr: ParameterAttribute|False + :param num_channels: number of input channels. If None will be set + automatically from previous output. + :type num_channels: int + :param param_attr: Convolution param attribute. None means default attribute + :type param_attr: ParameterAttribute + :param shared_biases: Is biases will be shared between filters or not. + :type shared_biases: bool + :param layer_attr: Layer Extra Attribute. + :type layer_attr: ExtraLayerAttribute + :return: LayerOutput object. + :rtype: LayerOutput + """ + if num_channels is None: + assert input.num_filters is not None + num_channels = input.num_filters + + if filter_size_y is None: + if isinstance(filter_size, collections.Sequence): + assert len(filter_size) == 2 + filter_size, filter_size_y = filter_size + else: + filter_size_y = filter_size + + if stride_y is None: + if isinstance(stride, collections.Sequence): + assert len(stride) == 2 + stride, stride_y = stride + else: + stride_y = stride + + if padding_y is None: + if isinstance(padding, collections.Sequence): + assert len(padding) == 2 + padding, padding_y = padding + else: + padding_y = padding + + if param_attr.attr.get('initial_smart'): + # special initial for conv layers. + init_w = (2.0 / (filter_size ** 2 * num_channels)) ** 0.5 + param_attr.attr["initial_mean"] = 0.0 + param_attr.attr["initial_std"] = init_w + param_attr.attr["initial_strategy"] = 0 + param_attr.attr["initial_smart"] = False + Layer( + name=name, + inputs=Input(input.name, conv=Conv( + filter_size=filter_size, padding=padding, stride=stride, + channels=num_channels, groups=groups, + filter_size_y=filter_size_y, padding_y=padding_y, + stride_y=stride_y), + **param_attr.attr), + active_type=act.name, + num_filters=num_filters, + bias=ParamAttr.to_bias(bias_attr), + shared_biases=shared_biases, + type=LayerType.CONVTRANS_LAYER, + **ExtraLayerAttribute.to_kwargs(layer_attr) + ) + return LayerOutput(name, LayerType.CONVTRANS_LAYER, parents=[input], + activation=act, num_filters=num_filters) + + @wrap_name_default("pool") @layer_support() From bda259bb18b484b88d0a144392c2ccb1b1530769 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 26 Oct 2016 16:36:54 -0700 Subject: [PATCH 126/180] added more test on convTrans layer and comments --- paddle/gserver/layers/ConvTransBaseLayer.cpp | 11 ++ paddle/gserver/layers/ConvTransBaseLayer.h | 5 + .../gserver/layers/ExpandConvTransLayer.cpp | 5 + paddle/gserver/layers/ExpandConvTransLayer.h | 2 +- paddle/gserver/tests/test_ConvTrans.cpp | 116 +++++++++++++++++- python/paddle/trainer/config_parser.py | 14 +-- .../paddle/trainer_config_helpers/layers.py | 2 +- 7 files changed, 144 insertions(+), 11 deletions(-) diff --git a/paddle/gserver/layers/ConvTransBaseLayer.cpp b/paddle/gserver/layers/ConvTransBaseLayer.cpp index 68fb48a38b518..1b58b7fed43d4 100644 --- a/paddle/gserver/layers/ConvTransBaseLayer.cpp +++ b/paddle/gserver/layers/ConvTransBaseLayer.cpp @@ -23,6 +23,17 @@ bool ConvTransBaseLayer::init(const LayerMap& layerMap, Layer::init(layerMap, parameterMap); /* Initialize the convolutional layer parameter */ + /* Everything is the same as ConvBaseLayer.cpp except that the meaning of + * num_filters and channel is switched. + * + * In the config, num_filters refer to the number of feature maps in the + * output of convTransLayer, and channel refer to the number of feature maps + * in the input of convTransLayer. + * + * However, within the convTrans class, the channel is related to the output + * and num_filters is related to the input, so that it is consistent with the + * settings in convLayer. + * */ channel_ = config_.num_filters(); sharedBiases_ = config_.shared_biases(); for (auto& inputConfig : config_.inputs()) { diff --git a/paddle/gserver/layers/ConvTransBaseLayer.h b/paddle/gserver/layers/ConvTransBaseLayer.h index 467a260569759..d7acc184cc9ac 100644 --- a/paddle/gserver/layers/ConvTransBaseLayer.h +++ b/paddle/gserver/layers/ConvTransBaseLayer.h @@ -96,6 +96,11 @@ class ConvTransBaseLayer : public Layer { * - outputSize = 5; */ + /* + * In order to be consistent with the convLayer, here the outputSize is + * actually the size of the input image of convTransLayer, and the image size + * is actually the size of the output image of convTransLayer + */ int imageSize(int outputSize, int filterSize, int padding, int stride) { int imageSize; if (!caffeMode_) { diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 56cc042653b60..67c045821d173 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -17,6 +17,11 @@ limitations under the License. */ #include "paddle/utils/Stat.h" #include "ExpandConvTransLayer.h" +/* The implementation of the convTransLayer is basically a swap of forward and + * backward of the original convLayer. + * The variable naming follows the convention of the convLayer. + * */ + namespace paddle { REGISTER_LAYER(exconvt, ExpandConvTransLayer); diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index f19aa81f3d3c2..a6591fe1aa386 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -26,7 +26,7 @@ namespace paddle { * This layer expands input and use matrix multiplication to * calculate convolution operation. * - * The config file api is img_conv_layer. + * The config file api is img_convTrans_layer. */ class ExpandConvTransLayer : public ConvTransBaseLayer { protected: diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index e7cbe2614faca..787113d242391 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -33,7 +33,9 @@ P_DECLARE_double(checkgrad_eps); P_DECLARE_bool(thread_local_rand_use_global_seed); P_DECLARE_bool(prev_batch_state); +// Test that the convTrans forward is the same as conv backward TEST(Layer, convTransLayerFwd) { + // Setting up conv-trans layer TestConfig configt; configt.biasSize = 3; configt.layerConfig.set_type("exconvt"); @@ -68,7 +70,7 @@ TEST(Layer, convTransLayerFwd) { LayerMap layerMap; vector datas; initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans", - 100, false, useGpu); + 100, false, false); // test layer initialize std::vector parameters; LayerPtr convtLayer; @@ -76,6 +78,7 @@ TEST(Layer, convTransLayerFwd) { convtLayer->getBiasParameter()->zeroMem(); convtLayer->forward(PASS_GC); + // Setting up conv-layer config TestConfig config; config.biasSize = 16; config.layerConfig.set_type("exconv"); @@ -109,16 +112,18 @@ TEST(Layer, convTransLayerFwd) { LayerMap layerMap2; vector datas2; initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv", - 100, false, useGpu); + 100, false, false); // test layer initialize std::vector parameters2; LayerPtr convLayer; initTestLayer(config, &layerMap2, ¶meters2, &convLayer); + // Sync convLayer and convtLayer parameter convLayer->getBiasParameter()->zeroMem(); convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom( *(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE))); + // Set convLayer outputGrad as convTransLayer input value convLayer->forward(PASS_GC); convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue())); @@ -126,10 +131,117 @@ TEST(Layer, convTransLayerFwd) { auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; }; convLayer->backward(callback); + // Check that the convLayer backward is the same as convTransLayer forward checkMatrixEqual(convtLayer->getOutputValue(), dataLayers2[0]->getOutputGrad()); } + +// Do one forward pass of convTrans layer and check to see if its output +// matches the given result +void doOneConvtTest(size_t imgSize, size_t output_x, size_t stride, + size_t padding, size_t filter_size, MatrixPtr& result) { + TestConfig configt; + configt.biasSize = 1; + configt.layerConfig.set_type("exconvt"); + configt.layerConfig.set_num_filters(1); + configt.layerConfig.set_partial_sum(1); + configt.layerConfig.set_shared_biases(true); + + configt.inputDefs.push_back({INPUT_DATA, "layer_0", output_x * output_x, + filter_size * filter_size}); + LayerInputConfig* input = configt.layerConfig.add_inputs(); + ConvConfig* conv = input->mutable_conv_conf(); + conv->set_filter_size(filter_size); + conv->set_filter_size_y(filter_size); + conv->set_channels(1); + conv->set_padding(padding); + conv->set_padding_y(padding); + conv->set_stride(stride); + conv->set_stride_y(stride); + conv->set_groups(1); + conv->set_filter_channels(1); + conv->set_img_size(imgSize); + conv->set_output_x(output_x); + + configt.layerConfig.set_size(conv->img_size() * conv->img_size() * + configt.layerConfig.num_filters()); + configt.layerConfig.set_name("convTrans"); + + std::vector dataLayers; + LayerMap layerMap; + vector datas; + initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans", + 1, false, false); + dataLayers[0]->getOutputValue()->zeroMem(); + dataLayers[0]->getOutputValue()->add(1.0); + + // test layer initialize + std::vector parameters; + LayerPtr convtLayer; + initTestLayer(configt, &layerMap, ¶meters, &convtLayer); + convtLayer->getBiasParameter()->zeroMem(); + convtLayer->getParameters()[0]->zeroMem(); + convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->add(1.0); + convtLayer->forward(PASS_GC); + + checkMatrixEqual(convtLayer->getOutputValue(), result); +} + +TEST(Layer, convTransLayerFwd2) { + size_t imgSize, output_x, stride, padding, filter_size; + MatrixPtr result; + + imgSize = 5; + output_x = 1; + stride = 1; + padding = 0; + filter_size = 5; + result = Matrix::create(1, imgSize * imgSize, false, false); + result->zeroMem(); + result->add(1.0); + doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); + + imgSize = 5; + output_x = 2; + stride = 1; + padding = 0; + filter_size = 4; + float resultData[] = {1, 2, 2, 2, 1, + 2, 4, 4, 4, 2, + 2, 4, 4, 4, 2, + 2, 4, 4, 4, 2, + 1, 2, 2, 2, 1}; + result = Matrix::create(resultData, 1, imgSize * imgSize, false, false); + doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); + + imgSize = 5; + output_x = 2; + stride = 2; + padding = 1; + filter_size = 5; + float resultData2[] = {1, 2, 2, 2, 1, + 2, 4, 4, 4, 2, + 2, 4, 4, 4, 2, + 2, 4, 4, 4, 2, + 1, 2, 2, 2, 1}; + result = Matrix::create(resultData2, 1, imgSize * imgSize, false, false); + doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); + + imgSize = 5; + output_x = 2; + stride = 2; + padding = 0; + filter_size = 3; + float resultData3[] = {1, 1, 2, 1, 1, + 1, 1, 2, 1, 1, + 2, 2, 4, 2, 2, + 1, 1, 2, 1, 1, + 1, 1, 2, 1, 1}; + result = Matrix::create(resultData3, 1, imgSize * imgSize, false, false); + doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 2d28b34999cb0..95c5f774c6cea 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1107,7 +1107,7 @@ def parse_conv(conv, input_layer_name, conv_conf): conv_conf.caffe_mode) -def parse_convt(conv, input_layer_name, conv_conf): +def parse_convt(conv, input_layer_name, conv_conf, num_filters): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.channels = conv.channels @@ -1116,7 +1116,7 @@ def parse_convt(conv, input_layer_name, conv_conf): conv_conf.stride = conv.stride conv_conf.stride_y = conv.stride_y conv_conf.groups = conv.groups - conv_conf.filter_channels = conv.channels / conv.groups + conv_conf.filter_channels = num_filters / conv.groups conv_conf.caffe_mode = conv.caffe_mode outputSize = g_layer_map[input_layer_name].size / conv.channels @@ -1126,14 +1126,14 @@ def parse_convt(conv, input_layer_name, conv_conf): config_assert((conv_conf.output_x ** 2) == outputSize, ("Input layer %s: Incorrect input image size %d for input " + "image pixels %d") - % (input_layer_name, conv_conf.img_size, img_pixels)) + % (input_layer_name, conv_conf.output_x, outputSize)) if conv.caffe_mode: conv_conf.img_size = \ (conv_conf.output_x - 1) * conv.stride \ + conv.filter_size - 2 * conv.padding else: conv_conf.img_size = \ - (conv_conf.output_x - 1) * conv.stride \ + (conv_conf.output_x - 2) * conv.stride \ + conv.filter_size - 2 * conv.padding + 1 @@ -1655,7 +1655,7 @@ def __init__( num_filters=None, shared_biases=False, **xargs): - super(ConvLayerBase, self).__init__( + super(ConvTransLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) if num_filters is not None: @@ -1686,7 +1686,7 @@ def __init__( parse_convt( self.inputs[input_index].conv, input_layer.name, - self.config.inputs[input_index].conv_conf) + self.config.inputs[input_index].conv_conf, num_filters) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) print("output size for %s is %d " % (name, conv_conf.output_x)) @@ -1700,7 +1700,7 @@ def __init__( self.create_bias_parameter(bias, psize, [psize, 1]) def calc_parameter_size(self, conv_conf): - return conv_conf.channels() * conv_conf.filter_channels \ + return conv_conf.channels * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y) @config_layer('exconvt') diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 853df8b83709d..172d45a761ecc 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -36,7 +36,7 @@ "pooling_layer", "lstmemory", "last_seq", "first_seq", "cos_sim", "hsigmoid", "conv_projection", "regression_cost", 'classification_cost', "LayerOutput", - 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', + 'img_conv_layer', 'img_convTrans_layer', 'img_pool_layer', 'batch_norm_layer', 'img_cmrnorm_layer', 'addto_layer', 'concat_layer', 'lstm_step_layer', 'recurrent_group', 'memory', 'StaticInput', 'expand_layer', 'scaling_layer', From aa2cd2ce8f01072a2e604740bd350417c23485c4 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 1 Nov 2016 10:22:00 -0700 Subject: [PATCH 127/180] Refactor ExpandConvTransLayer to share codes with ExpandConvLayer --- paddle/gserver/layers/ConvBaseLayer.cpp | 7 ++ paddle/gserver/layers/ConvBaseLayer.h | 67 +++++++++++++++++++ paddle/gserver/layers/ExpandConvLayer.cpp | 18 ----- paddle/gserver/layers/ExpandConvLayer.h | 19 +----- .../gserver/layers/ExpandConvTransLayer.cpp | 40 ++--------- paddle/gserver/layers/ExpandConvTransLayer.h | 20 ++---- 6 files changed, 83 insertions(+), 88 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 42ff0b70d86f7..4346cb520ea01 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -21,6 +21,12 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, /* Initialize the basic parent class */ Layer::init(layerMap, parameterMap); + if (config_.type() == "exconv" || config_.type() == "cudnn_conv") { + isConv_ = true; + } else { + isConv_ = false; + } + /* Initialize the convolutional layer parameter */ numFilters_ = config_.num_filters(); sharedBiases_ = config_.shared_biases(); @@ -88,6 +94,7 @@ size_t ConvBaseLayer::calOutputSize() { getOutput().setFrameWidth(outputW_[0]); layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); return layerSize; + } } // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index e660a6d6f50ac..24927dec24d0c 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -28,6 +28,9 @@ class ConvBaseLayer : public Layer { protected: typedef std::vector IntV; + /// True if it's convolution layer, false if it's deconv layer + bool isConv_; + /// The number of filters. int numFilters_; /// The x dimension of the padding. @@ -75,6 +78,13 @@ class ConvBaseLayer : public Layer { /// of output size. bool caffeMode_; + /*The expandInput_ and transOutValue_ are used for CPU expand conv calc*/ + /// Expand one sample at a time. shape: + /// (numChannels * filterPixels_, outputSizeH * outputSizeW) + MatrixPtr expandInput_; + /// The transpose of output, which is an auxiliary matrix. + MatrixPtr transOutValue_; + public: explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {} @@ -88,6 +98,63 @@ class ConvBaseLayer : public Layer { virtual size_t calOutputSize(); Weight& getWeight(int idx) { return *weights_[idx]; } + + /** + * Calculate output size based on caffeMode_. + * - input(+padding): 0123456789 + * - imageSize(+padding) = 10; + * - filterSize = 3; + * - stride = 2; + * - caffeMode_ is true: + - output: (012), (234), (456), (678) + - outputSize = 4; + * - caffeMode_ is false: + * - output: (012), (234), (456), (678), (9) + * - outputSize = 5; + */ + int outputSize(int imageSize, int filterSize, int padding, int stride) { + int outputSize; + if (!caffeMode_) { + outputSize = + (imageSize - filterSize + 2 * padding + stride - 1) / stride + 1; + } else { + outputSize = (imageSize - filterSize + 2 * padding) / stride + 1; + } + CHECK_GE(outputSize, 1); + return outputSize; + } + + int imageSize(int outputSize, int filterSize, int padding, int stride) { + int imageSize; + if (!caffeMode_) { + imageSize = + (outputSize - 1) * stride + filterSize - 2 * padding - stride + 1; + } else { + imageSize = (outputSize - 1) * stride + filterSize - 2 * padding; + } + CHECK_GE(imageSize, 1); + return imageSize; + } + + /** + * Create or resize expandInput_. + */ + void resetExpandInput(size_t height, size_t width); + + /** + * Create or resize transOutValue_. + */ + void resetConvOutput(size_t batchSize, int inIdx); + + /** + * Add shared bias. + */ + void addSharedBias(); + + /** + * Add unshared bias. + */ + void addUnsharedBias(); }; } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 80a6a62b5c0de..866cd33c118d0 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -63,14 +63,6 @@ size_t ExpandConvLayer::getOutputSize() { return layerSize; } -void ExpandConvLayer::resetExpandInput(size_t height, size_t width) { - Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); -} - -void ExpandConvLayer::resetConvOutput(size_t batchSize, int inIdx) { - Matrix::resizeOrCreate(transOutValue_, batchSize * numFilters_, subN_[inIdx], - false, useGpu_); -} void ExpandConvLayer::expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx) { @@ -135,17 +127,7 @@ void ExpandConvLayer::addSharedBias() { transOutValue_->reshape(mapW, mapH); transOutValue_->transpose(out, false); // false means no memory allocation - out->clear(); - bias->clear(); -} -void ExpandConvLayer::addUnsharedBias() { - MatrixPtr outValue = getOutputValue(); - MatrixPtr bias = - Matrix::create(biases_->getW()->getData(), 1, - biases_->getW()->getElementCnt(), false, useGpu_); - outValue->addBias(*bias, 1.0f); -} void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index 030a3ba397ff4..f43b199498ec3 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -43,6 +43,7 @@ class ExpandConvLayer : public ConvBaseLayer { /// The transpose of output, which is an auxiliary matrix. MatrixPtr transOutValue_; + public: explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} @@ -52,16 +53,6 @@ class ExpandConvLayer : public ConvBaseLayer { size_t getOutputSize(); - /** - * Create or resize expandInput_. - */ - void resetExpandInput(size_t height, size_t width); - - /** - * Create or resize transOutValue_. - */ - void resetConvOutput(size_t batchSize, int inIdx); - /** * Expand one input sample. */ @@ -72,15 +63,7 @@ class ExpandConvLayer : public ConvBaseLayer { */ void expandFwdOnce(MatrixPtr image, int inIdx, int startIdx); - /** - * Add shared bias. - */ - void addSharedBias(); - /** - * Add unshared bias. - */ - void addUnsharedBias(); void forward(PassType passType); void bpropSharedBias(MatrixPtr biases, MatrixPtr v); void bpropBiases(MatrixPtr v); diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 67c045821d173..fb2e7fc4bd6e2 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -29,14 +29,14 @@ REGISTER_LAYER(exconvt, ExpandConvTransLayer); bool ExpandConvTransLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ConvTransBaseLayer::init(layerMap, parameterMap); + ConvBaseLayer::init(layerMap, parameterMap); /* Initialize the projection */ for (auto &inputConfig : config_.inputs()) { const ConvConfig &conf = inputConfig.conv_conf(); subM_.push_back(conf.channels() / conf.groups()); subN_.push_back(conf.output_x() * conf.output_x()); - subK_.push_back(channel_ * conf.filter_size() * conf.filter_size() / + subK_.push_back(numFilters_ * conf.filter_size() * conf.filter_size() / conf.groups()); /* Consistent caffe mode for multiple input */ caffeMode_ = conf.caffe_mode(); @@ -65,8 +65,8 @@ size_t ExpandConvTransLayer::getSize() { imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); subN_.push_back(outputH_[i] * outputW_[i]); CHECK(layerSize == 0 || - imgSizeH_[i] * imgSizeW_[i] * (size_t)channel_ == layerSize); - layerSize = imgSizeH_[i] * imgSizeW_[i] * channel_; + imgSizeH_[i] * imgSizeW_[i] * (size_t)numFilters_ == layerSize); + layerSize = imgSizeH_[i] * imgSizeW_[i] * numFilters_; } getOutput().setFrameHeight(imgSizeH_[0]); getOutput().setFrameWidth(imgSizeW_[0]); @@ -83,38 +83,6 @@ void ExpandConvTransLayer::resetExpandInput(size_t height, size_t width) { }*/ -void ExpandConvTransLayer::addSharedBias() { - size_t mapW = getSize() / channel_; - size_t mapH = getOutputValue()->getElementCnt() / mapW; - MatrixPtr out = - Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - out->transpose(transOutValue_, false); // false means no memory allocation - transOutValue_->reshape(transOutValue_->getElementCnt() / channel_, - channel_); - - MatrixPtr bias = - Matrix::create(biases_->getW()->getData(), 1, - biases_->getW()->getElementCnt(), false, useGpu_); - transOutValue_->addBias(*bias, 1.0f); - - transOutValue_->reshape(mapW, mapH); - transOutValue_->transpose(out, false); // false means no memory allocation - - out->clear(); - bias->clear(); -} - -void ExpandConvTransLayer::addUnsharedBias() { - MatrixPtr outValue = getOutputValue(); - MatrixPtr bias = - Matrix::create(biases_->getW()->getData(), 1, - biases_->getW()->getElementCnt(), false, useGpu_); - outValue->addBias(*bias, 1.0f); -} - void ExpandConvTransLayer::expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx) { diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index a6591fe1aa386..cbe4da8143cef 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once -#include "ConvTransBaseLayer.h" +#include "ConvBaseLayer.h" #include "paddle/math/Matrix.h" #include @@ -28,7 +28,7 @@ namespace paddle { * * The config file api is img_convTrans_layer. */ -class ExpandConvTransLayer : public ConvTransBaseLayer { +class ExpandConvTransLayer : public ConvBaseLayer { protected: /// For expand convolution. /// subM_ = numFilters_ / groups_. @@ -45,15 +45,11 @@ class ExpandConvTransLayer : public ConvTransBaseLayer { IntV outputH_; /// The spatial dimensions of width of output feature map. IntV outputW_; - /// Expand one sample at a time. shape: - /// (numChannels * filterPixels_, outputSizeH * outputSizeW) - MatrixPtr expandInput_; - /// The transpose of output, which is an auxiliary matrix. - MatrixPtr transOutValue_; + public: explicit ExpandConvTransLayer(const LayerConfig& config) : - ConvTransBaseLayer(config) {} + ConvBaseLayer(config) {} ~ExpandConvTransLayer() {} @@ -86,15 +82,7 @@ class ExpandConvTransLayer : public ConvTransBaseLayer { */ void shrinkFwd(MatrixPtr output, int inpIdx); - /** - * Add shared bias. - */ - void addSharedBias(); - /** - * Add unshared bias. - */ - void addUnsharedBias(); void forward(PassType passType); void bpropSharedBias(MatrixPtr biases, MatrixPtr v); void bpropBiases(MatrixPtr v); From 2575b74feefd10dde3990452c53e595f83f08d52 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 1 Nov 2016 15:12:17 -0700 Subject: [PATCH 128/180] refactored ExpandConvLayer and ExpandConvTransLayer with ConvBaseLayerCpu --- paddle/gserver/layers/ConvBaseLayer.h | 27 +- paddle/gserver/layers/ConvBaseLayerCpu.cpp | 241 ++++++++++++++++++ paddle/gserver/layers/ConvBaseLayerCpu.h | 91 +++++++ paddle/gserver/layers/ConvTransBaseLayer.cpp | 88 ------- paddle/gserver/layers/ConvTransBaseLayer.h | 117 --------- paddle/gserver/layers/ExpandConvLayer.cpp | 219 +--------------- paddle/gserver/layers/ExpandConvLayer.h | 36 +-- .../gserver/layers/ExpandConvTransLayer.cpp | 195 +------------- paddle/gserver/layers/ExpandConvTransLayer.h | 56 +--- paddle/gserver/tests/test_LayerGrad.cpp | 2 + 10 files changed, 355 insertions(+), 717 deletions(-) create mode 100644 paddle/gserver/layers/ConvBaseLayerCpu.cpp create mode 100644 paddle/gserver/layers/ConvBaseLayerCpu.h delete mode 100644 paddle/gserver/layers/ConvTransBaseLayer.cpp delete mode 100644 paddle/gserver/layers/ConvTransBaseLayer.h diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index 24927dec24d0c..ecdc119a94941 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -78,12 +78,7 @@ class ConvBaseLayer : public Layer { /// of output size. bool caffeMode_; - /*The expandInput_ and transOutValue_ are used for CPU expand conv calc*/ - /// Expand one sample at a time. shape: - /// (numChannels * filterPixels_, outputSizeH * outputSizeW) - MatrixPtr expandInput_; - /// The transpose of output, which is an auxiliary matrix. - MatrixPtr transOutValue_; + public: explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {} @@ -135,26 +130,6 @@ class ConvBaseLayer : public Layer { CHECK_GE(imageSize, 1); return imageSize; } - - /** - * Create or resize expandInput_. - */ - void resetExpandInput(size_t height, size_t width); - - /** - * Create or resize transOutValue_. - */ - void resetConvOutput(size_t batchSize, int inIdx); - - /** - * Add shared bias. - */ - void addSharedBias(); - - /** - * Add unshared bias. - */ - void addUnsharedBias(); }; } // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseLayerCpu.cpp b/paddle/gserver/layers/ConvBaseLayerCpu.cpp new file mode 100644 index 0000000000000..0da92bf0485b0 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseLayerCpu.cpp @@ -0,0 +1,241 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#include "paddle/utils/Logging.h" +#include "ConvBaseLayerCpu.h" +namespace paddle { + +bool ConvBaseLayerCpu::init(const LayerMap &layerMap, + const ParameterMap ¶meterMap) { + /* Initialize the basic convolutional parent class */ + ConvBaseLayer::init(layerMap, parameterMap); + + int channel; + /* Initialize the projection */ + for (auto &inputConfig : config_.inputs()) { + const ConvConfig &conf = inputConfig.conv_conf(); + subM_.push_back(numFilters_ / conf.groups()); + subN_.push_back(conf.output_x() * conf.output_x()); + channel = isConv_ ? conf.channels() : numFilters_; + subK_.push_back(channel * conf.filter_size() * conf.filter_size() / + conf.groups()); + /* Consistent caffe mode for multiple input */ + caffeMode_ = conf.caffe_mode(); + } + + return true; +} + +void ConvBaseLayerCpu::resetExpandInput(size_t height, size_t width) { + Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); +} + +void ConvBaseLayerCpu::addSharedBias() { + size_t mapW = getSize() / numFilters_; + size_t mapH = getOutputValue()->getElementCnt() / mapW; + MatrixPtr out = + Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); + + Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); + + out->transpose(transOutValue_, false); // false means no memory allocation + transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_, + numFilters_); + + MatrixPtr bias = + Matrix::create(biases_->getW()->getData(), 1, + biases_->getW()->getElementCnt(), false, useGpu_); + transOutValue_->addBias(*bias, 1.0f); + + transOutValue_->reshape(mapW, mapH); + transOutValue_->transpose(out, false); // false means no memory allocation + + out->clear(); + bias->clear(); +} + +void ConvBaseLayerCpu::addUnsharedBias() { + MatrixPtr outValue = getOutputValue(); + MatrixPtr bias = + Matrix::create(biases_->getW()->getData(), 1, + biases_->getW()->getElementCnt(), false, useGpu_); + outValue->addBias(*bias, 1.0f); +} + + +void ConvBaseLayerCpu::expandOneFrame(MatrixPtr image, size_t startIdx, + int inIdx) { + int channel = isConv_ ? channels_[inIdx] : numFilters_; + + resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); + real *imgData = image->getData() + startIdx * image->getWidth(); + MatrixPtr imageTmp = Matrix::create( + imgData, 1, imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel, false, + useGpu_); + expandInput_->convExpand(*imageTmp, imgSizeH_[inIdx], imgSizeW_[inIdx], + channel, filterSize_[inIdx], + filterSize_[inIdx], stride_[inIdx], stride_[inIdx], + padding_[inIdx], padding_[inIdx], + outputH_[inIdx], outputW_[inIdx]); + imageTmp->clear(); +} + +void ConvBaseLayerCpu::expandFwdOnce(MatrixPtr image, MatrixPtr out, + int inIdx, int startIdx) { + int subM = subM_[inIdx]; + int subN = subN_[inIdx]; + int subK = subK_[inIdx]; + + expandOneFrame(image, startIdx, inIdx); + + int nf = isConv_ ? numFilters_ : channels_[inIdx]; + + real *outData = + out->getData() + startIdx * subN * nf; + + real *wgtData = weights_[inIdx]->getW()->getData(); + real *expInData = expandInput_->getData(); + for (int g = 0; g < groups_[inIdx]; ++g) { + MatrixPtr A = + Matrix::create(wgtData, subK, subM, true, useGpu_); // mark transpose + MatrixPtr B = Matrix::create(expInData, subK, subN, false, useGpu_); + MatrixPtr C = Matrix::create(outData, subM, subN, false, useGpu_); + C->mul(A, B, 1, 1); + + A->clear(); + B->clear(); + C->clear(); + wgtData += subK * subM; + expInData += subK * subN; + outData += subM * subN; + } +} + +void ConvBaseLayerCpu::bpropActs(MatrixPtr image, MatrixPtr out, int inpIdx) { + int channel = isConv_ ? channels_[inpIdx] : numFilters_; + + int subM = subM_[inpIdx]; + int subN = subN_[inpIdx]; + int subK = subK_[inpIdx]; + size_t batchSize = image->getHeight(); + MatrixPtr tgtGrad = out; + + /* reset the expand-grad memory */ + resetExpandInput(subK * groups_[inpIdx], subN); + + real *localGradData = image->getData(); + real *tgtGradData = tgtGrad->getData(); + for (size_t n = 0; n < batchSize; n++) { + real *wgtData = weights_[inpIdx]->getW()->getData(); + real *expandInData = expandInput_->getData(); + + for (int g = 0; g < groups_[inpIdx]; g++) { + // create temporary matrix + MatrixPtr C = Matrix::create(expandInData, subK, subN, false, useGpu_); + MatrixPtr B = Matrix::create(localGradData, subM, subN, false, useGpu_); + MatrixPtr A = Matrix::create(wgtData, subK, subM, false, useGpu_); + C->mul(A, B); // mul + + // clear the temporary matrix + A->clear(); + B->clear(); + C->clear(); + + expandInData += subK * subN; + localGradData += subM * subN; + wgtData += subK * subM; + } + + // shrink one frame outGrad + MatrixPtr oneGradTmp = Matrix::create( + expandInput_->getData(), subK * groups_[inpIdx], subN, false, useGpu_); + MatrixPtr vTmp = Matrix::create( + tgtGradData, 1, + imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel, false, + useGpu_); + vTmp->convShrink(*oneGradTmp, imgSizeH_[inpIdx], imgSizeW_[inpIdx], + channel, filterSize_[inpIdx], + filterSize_[inpIdx], stride_[inpIdx], stride_[inpIdx], + padding_[inpIdx], padding_[inpIdx], + outputH_[inpIdx], outputW_[inpIdx], 1.0f, 1.0f); + vTmp->clear(); + oneGradTmp->clear(); + + // move the data-pointer + tgtGradData += imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel; + } +} + +void ConvBaseLayerCpu::bpropWeights(MatrixPtr image, MatrixPtr out, + int inpIdx) { + MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); + + int subM = subM_[inpIdx]; + int subN = subN_[inpIdx]; + int subK = subK_[inpIdx]; + size_t batchSize = image->getHeight(); + resetExpandInput(subK * groups_[inpIdx], subN); + + real *gradData = out->getData(); + + for (size_t n = 0; n < batchSize; n++) { // frame by frame + // expand + expandOneFrame(image, n, inpIdx); + real *wGradData = weightGrad->getData(); + real *expandInData = expandInput_->getData(); + + // expand-mul one-group by one + for (int g = 0; g < groups_[inpIdx]; g++) { + MatrixPtr A = Matrix::create(expandInData, subK, subN, false, useGpu_); + MatrixPtr B = Matrix::create(gradData, subM, subN, true, useGpu_); + MatrixPtr C = Matrix::create(wGradData, subK, subM, false, useGpu_); + C->mul(A, B, 1, 1); + + A->clear(); + B->clear(); + C->clear(); + gradData += subM * subN; + wGradData += subK * subM; + expandInData += subK * subN; + } + } +} + +void ConvBaseLayerCpu::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { + size_t mapW = getSize() / numFilters_; + size_t mapH = v->getElementCnt() / mapW; + MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); + + Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); + + vTmp->transpose(transOutValue_, false); // false means no memory allocation + transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_, + numFilters_); + biases->collectBias(*transOutValue_, 1.0f); +} + +void ConvBaseLayerCpu::bpropBiases(MatrixPtr v) { + MatrixPtr biases = + Matrix::create(biases_->getWGrad()->getData(), 1, + biases_->getWGrad()->getElementCnt(), false, useGpu_); + if (sharedBiases_) { + bpropSharedBias(biases, v); + } else { + biases->collectBias(*v, 1.0f); + } + biases->clear(); +} + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvBaseLayerCpu.h b/paddle/gserver/layers/ConvBaseLayerCpu.h new file mode 100644 index 0000000000000..08a1426b473a0 --- /dev/null +++ b/paddle/gserver/layers/ConvBaseLayerCpu.h @@ -0,0 +1,91 @@ +/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + + +#pragma once + +#include "ConvBaseLayer.h" +#include "paddle/math/Matrix.h" +#include + +namespace paddle { + +/** + * @brief A subclass of ConvBaseLayer that is a superclass of both + * ExpandConvLayer and ExpandConvTransLayer + */ +class ConvBaseLayerCpu : public ConvBaseLayer { +protected: + /// For expand convolution. + /// subM_ = numFilters_ / groups_. + IntV subM_; + /// subN_ = outputH_ * outputW_. + IntV subN_; + /// subK_ = channels_ * filterPixels_ * groups_. + IntV subK_; + /// The spatial dimensions of height of input feature map. + IntV imgSizeH_; + /// The spatial dimensions of width of input feature map. + IntV imgSizeW_; + /// The spatial dimensions of height of output feature map. + IntV outputH_; + /// The spatial dimensions of width of output feature map. + IntV outputW_; + + /*The expandInput_ and transOutValue_ are used for CPU expand conv calc*/ + /// Expand one sample at a time. shape: + /// (numChannels * filterPixels_, outputSizeH * outputSizeW) + MatrixPtr expandInput_; + /// The transpose of output, which is an auxiliary matrix. + MatrixPtr transOutValue_; + +public: + explicit ConvBaseLayerCpu(const LayerConfig& config) + : ConvBaseLayer(config) {} + + ~ConvBaseLayerCpu() {} + + bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + + /** + * Create or resize expandInput_. + */ + void resetExpandInput(size_t height, size_t width); + + /** + * Add shared bias. + */ + void addSharedBias(); + + /** + * Add unshared bias. + */ + void addUnsharedBias(); + /** + * Expand one input sample. + */ + void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx); + + /** + * Expand one input sample and perform matrix multiplication. + */ + void expandFwdOnce(MatrixPtr image, MatrixPtr out, int inIdx, int startIdx); + + void bpropSharedBias(MatrixPtr biases, MatrixPtr v); + void bpropBiases(MatrixPtr v); + void bpropWeights(MatrixPtr image, MatrixPtr out, int inpIdx); + void bpropActs(MatrixPtr image, MatrixPtr out, int inpIdx); +}; + +} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransBaseLayer.cpp b/paddle/gserver/layers/ConvTransBaseLayer.cpp deleted file mode 100644 index 1b58b7fed43d4..0000000000000 --- a/paddle/gserver/layers/ConvTransBaseLayer.cpp +++ /dev/null @@ -1,88 +0,0 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - - -#include "paddle/utils/Logging.h" -#include "ConvTransBaseLayer.h" -namespace paddle { - -bool ConvTransBaseLayer::init(const LayerMap& layerMap, - const ParameterMap& parameterMap) { - /* Initialize the basic parent class */ - Layer::init(layerMap, parameterMap); - - /* Initialize the convolutional layer parameter */ - /* Everything is the same as ConvBaseLayer.cpp except that the meaning of - * num_filters and channel is switched. - * - * In the config, num_filters refer to the number of feature maps in the - * output of convTransLayer, and channel refer to the number of feature maps - * in the input of convTransLayer. - * - * However, within the convTrans class, the channel is related to the output - * and num_filters is related to the input, so that it is consistent with the - * settings in convLayer. - * */ - channel_ = config_.num_filters(); - sharedBiases_ = config_.shared_biases(); - for (auto& inputConfig : config_.inputs()) { - const ConvConfig& conf = inputConfig.conv_conf(); - padding_.push_back(conf.padding()); - stride_.push_back(conf.stride()); - filterSize_.push_back(conf.filter_size()); - paddingY_.push_back(conf.padding_y()); - strideY_.push_back(conf.stride_y()); - filterSizeY_.push_back(conf.filter_size_y()); - filterPixels_.push_back(filterSize_.back() * filterSizeY_.back()); - numFilters_.push_back(conf.channels()); - imgSize_.push_back(conf.img_size()); - imgPixels_.push_back(imgSize_.back() * imgSize_.back()); - groups_.push_back(conf.groups()); - filterChannels_.push_back(conf.filter_channels()); - outputX_.push_back(conf.output_x()); - outputs_.push_back(outputX_.back() * outputX_.back()); - } - - /* initialize the weightList */ - CHECK(inputLayers_.size() == parameters_.size()); - for (size_t i = 0; i < inputLayers_.size(); i++) { - size_t height, width; - height = filterPixels_[i] * filterChannels_[i]; - width = numFilters_[i]; - - // create a new weight - CHECK_EQ(parameters_[i]->getSize(), width * height); - Weight* w = new Weight(height, width, parameters_[i]); - weights_.emplace_back(w); - } - - /* initialize the biases_ */ - if (biasParameter_.get() != NULL) { - if (sharedBiases_) { - CHECK_EQ((size_t)channel_, biasParameter_->getSize()); - biases_ = - std::unique_ptr(new Weight(channel_, 1, biasParameter_)); - } else { - biases_ = - std::unique_ptr(new Weight(getSize(), 1, biasParameter_)); - } - } - - // default caffe model - caffeMode_ = true; - - return true; -} - -} // namespace paddle diff --git a/paddle/gserver/layers/ConvTransBaseLayer.h b/paddle/gserver/layers/ConvTransBaseLayer.h deleted file mode 100644 index d7acc184cc9ac..0000000000000 --- a/paddle/gserver/layers/ConvTransBaseLayer.h +++ /dev/null @@ -1,117 +0,0 @@ -/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - - -#pragma once - -#include "Layer.h" -namespace paddle { - -/** - * @brief A Base Convolution Layer, which convolves the input image - * with learned filters and (optionally) adds biases. - */ - -class ConvTransBaseLayer : public Layer { -protected: - typedef std::vector IntV; - - /// The number of channel in image (the output of the deconv layer). - int channel_; - /// The x dimension of the padding. - IntV padding_; - /// The y dimension of the padding. - IntV paddingY_; - /// The x dimension of the stride. - IntV stride_; - /// The y dimension of the stride. - IntV strideY_; - /// The x dimension of a filter kernel. - IntV filterSize_; - /// The y dimension of a filter kernel. - IntV filterSizeY_; - /// The number of filters(i.e. the number channels of the deconv layer input) - IntV numFilters_; - /// The spatial dimensions of input feature map. - IntV imgSize_; - /// The total pixel size of input feature map. - /// imgPixels_ = imgSizeX_ * imgSizeY_. - IntV imgPixels_; - /// filterPixels_ = filterSizeX_ * filterSizeY_. - IntV filterPixels_; - /// filterChannels_ = channels_/groups_. - IntV filterChannels_; - /// The spatial dimensions of output feature map. - IntV outputX_; - /// The spatial dimensions of output feature map. - IntV outputs_; - /// Group size, refer to grouped convolution in - /// Alex Krizhevsky's paper: when group=2, the first half of the - /// filters are only connected to the first half of the input channels, - /// and the second half only connected to the second half. - IntV groups_; - /// Whether the bias is shared for feature in each channel. - bool sharedBiases_; - - /// shape of weight: (numChannels * filterPixels_, numFilters) - WeightList weights_; - /// If shared_biases is false shape of bias: (numFilters_, 1) - /// If shared_biases is ture shape of bias: - /// (numFilters_ * outputX * outputY, 1) - std::unique_ptr biases_; - - /// True by default. The only difference is the calculation - /// of output size. - bool caffeMode_; - -public: - explicit ConvTransBaseLayer(const LayerConfig& config) : Layer(config) {} - - virtual bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - - Weight& getWeight(int idx) { return *weights_[idx]; } - - /** - * Calculate image size based on caffeMode_ from outputSize. - * - input(+padding): 0123456789 - * - imageSize(+padding) = 10; - * - filterSize = 3; - * - stride = 2; - * - caffeMode_ is true: - - output: (012), (234), (456), (678) - - outputSize = 4; - * - caffeMode_ is false: - * - output: (012), (234), (456), (678), (9) - * - outputSize = 5; - */ - - /* - * In order to be consistent with the convLayer, here the outputSize is - * actually the size of the input image of convTransLayer, and the image size - * is actually the size of the output image of convTransLayer - */ - int imageSize(int outputSize, int filterSize, int padding, int stride) { - int imageSize; - if (!caffeMode_) { - imageSize = - (outputSize - 1) * stride + filterSize - 2 * padding - stride + 1; - } else { - imageSize = (outputSize - 1) * stride + filterSize - 2 * padding; - } - CHECK_GE(imageSize, 1); - return imageSize; - } -}; - -} // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 866cd33c118d0..5c30c5a1fec7b 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -24,32 +24,7 @@ REGISTER_LAYER(exconv, ExpandConvLayer); bool ExpandConvLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ConvBaseLayer::init(layerMap, parameterMap); - - /* Initialize the projection */ - for (auto &inputConfig : config_.inputs()) { - const ConvConfig &conf = inputConfig.conv_conf(); - subM_.push_back(numFilters_ / conf.groups()); - subN_.push_back(conf.output_x() * conf.output_x()); - subK_.push_back(conf.channels() * conf.filter_size() * conf.filter_size() / - conf.groups()); - /* Consistent caffe mode for multiple input */ - caffeMode_ = conf.caffe_mode(); - } - - /* initialize the weightList */ - CHECK(inputLayers_.size() == parameters_.size()); - for (size_t i = 0; i < inputLayers_.size(); i++) { - size_t height, width; - height = filterPixels_[i] * filterChannels_[i]; - width = numFilters_; - - // create a new weight - CHECK_EQ(parameters_[i]->getSize(), width * height); - Weight* w = new Weight(height, width, parameters_[i]); - weights_.emplace_back(w); - } - + ConvBaseLayerCpu::init(layerMap, parameterMap); return true; } @@ -63,72 +38,6 @@ size_t ExpandConvLayer::getOutputSize() { return layerSize; } - -void ExpandConvLayer::expandOneFrame(MatrixPtr image, size_t startIdx, - int inIdx) { - resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); - real *imgData = image->getData() + startIdx * image->getWidth(); - MatrixPtr imageTmp = Matrix::create( - imgData, 1, imgSizeH_[inIdx] * imgSizeW_[inIdx] * channels_[inIdx], false, - useGpu_); - expandInput_->convExpand(*imageTmp, imgSizeH_[inIdx], imgSizeW_[inIdx], - channels_[inIdx], filterSize_[inIdx], - filterSize_[inIdx], stride_[inIdx], stride_[inIdx], - padding_[inIdx], padding_[inIdx], - outputH_[inIdx], outputW_[inIdx]); - imageTmp->clear(); -} - -void ExpandConvLayer::expandFwdOnce(MatrixPtr image, int inIdx, int startIdx) { - int subM = subM_[inIdx]; - int subN = subN_[inIdx]; - int subK = subK_[inIdx]; - - expandOneFrame(image, startIdx, inIdx); - - real *outData = - getOutputValue()->getData() + startIdx * subN * numFilters_; - - real *wgtData = weights_[inIdx]->getW()->getData(); - real *expInData = expandInput_->getData(); - for (int g = 0; g < groups_[inIdx]; ++g) { - MatrixPtr A = - Matrix::create(wgtData, subK, subM, true, useGpu_); // mark transpose - MatrixPtr B = Matrix::create(expInData, subK, subN, false, useGpu_); - MatrixPtr C = Matrix::create(outData, subM, subN, false, useGpu_); - C->mul(A, B, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - wgtData += subK * subM; - expInData += subK * subN; - outData += subM * subN; - } -} - -void ExpandConvLayer::addSharedBias() { - size_t mapW = getOutputValue()->getWidth() / numFilters_; - size_t mapH = getOutputValue()->getElementCnt() / mapW; - MatrixPtr out = - Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - out->transpose(transOutValue_, false); // false means no memory allocation - transOutValue_->reshape(transOutValue_->getElementCnt() / numFilters_, - numFilters_); - - MatrixPtr bias = - Matrix::create(biases_->getW()->getData(), 1, - biases_->getW()->getElementCnt(), false, useGpu_); - transOutValue_->addBias(*bias, 1.0f); - - transOutValue_->reshape(mapW, mapH); - transOutValue_->transpose(out, false); // false means no memory allocation - - - void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); @@ -145,7 +54,7 @@ void ExpandConvLayer::forward(PassType passType) { image = prevLayer->getOutputValue(); for (size_t off = 0; off < image->getHeight(); off++) { REGISTER_TIMER_INFO("expandFwdOnce", getName().c_str()); - expandFwdOnce(image, i, off); + expandFwdOnce(image, getOutputValue(), i, off); } } /* add the bias-vector */ @@ -161,29 +70,6 @@ void ExpandConvLayer::forward(PassType passType) { forwardActivation(); } -void ExpandConvLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = v->getWidth() / numFilters_; - size_t mapH = v->getElementCnt() / mapW; - MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - vTmp->transpose(transOutValue_, false); // false means no memory allocation - vTmp->reshape(transOutValue_->getElementCnt() / numFilters_, numFilters_); - biases->collectBias(*vTmp, 1.0f); -} - -void ExpandConvLayer::bpropBiases(MatrixPtr v) { - MatrixPtr biases = - Matrix::create(biases_->getWGrad()->getData(), 1, - biases_->getWGrad()->getElementCnt(), false, useGpu_); - if (sharedBiases_) { - bpropSharedBias(biases, v); - } else { - biases->collectBias(*v, 1.0f); - } - biases->clear(); -} void ExpandConvLayer::backward(const UpdateCallback &callback) { backwardActivation(); @@ -197,109 +83,16 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) { for (size_t i = 0; i != inputLayers_.size(); ++i) { /* First, calculate the input layers error */ - bpropActs(outGrad, i); + if (NULL != getPrev(i)->getOutputGrad()) { + bpropActs(outGrad, getPrev(i)->getOutputGrad(), i); + } if (weights_[i]->getWGrad()) { /* Then, calculate the W-gradient for the current layer */ - bpropWeights(outGrad, i); + bpropWeights(getPrev(i)->getOutputValue(), outGrad, i); /* Increasing the number of gradient */ weights_[i]->getParameterPtr()->incUpdate(callback); } } } -void ExpandConvLayer::bpropWeights(MatrixPtr v, int inpIdx) { - MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); - MatrixPtr inputV = getPrev(inpIdx)->getOutputValue(); - - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - size_t batchSize = inputV->getHeight(); - resetExpandInput(subK * groups_[inpIdx], subN); - resetConvOutput(batchSize, inpIdx); - - real *gradData = v->getData(); - - for (size_t n = 0; n < batchSize; n++) { // frame by frame - // expand - expandOneFrame(inputV, n, inpIdx); - real *wGradData = weightGrad->getData(); - real *expandInData = expandInput_->getData(); - - // expand-mul one-group by one - for (int g = 0; g < groups_[inpIdx]; g++) { - MatrixPtr A = Matrix::create(expandInData, subK, subN, false, useGpu_); - MatrixPtr B = Matrix::create(gradData, subM, subN, true, useGpu_); - MatrixPtr C = Matrix::create(wGradData, subK, subM, false, useGpu_); - C->mul(A, B, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - gradData += subM * subN; - wGradData += subK * subM; - expandInData += subK * subN; - } - } -} - -void ExpandConvLayer::bpropActs(MatrixPtr v, int inpIdx) { - LayerPtr prevLayer = getPrev(inpIdx); - if (NULL == prevLayer->getOutputGrad()) { - return; - } - - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - size_t batchSize = v->getHeight(); - MatrixPtr tgtGrad = prevLayer->getOutputGrad(); - - /* reset the expand-grad memory */ - resetExpandInput(subK * groups_[inpIdx], subN); - resetConvOutput(batchSize, inpIdx); - - real *localGradData = v->getData(); - real *tgtGradData = tgtGrad->getData(); - for (size_t n = 0; n < batchSize; n++) { - real *wgtData = weights_[inpIdx]->getW()->getData(); - real *expandInData = expandInput_->getData(); - - for (int g = 0; g < groups_[inpIdx]; g++) { - // create temporary matrix - MatrixPtr C = Matrix::create(expandInData, subK, subN, false, useGpu_); - MatrixPtr B = Matrix::create(localGradData, subM, subN, false, useGpu_); - MatrixPtr A = Matrix::create(wgtData, subK, subM, false, useGpu_); - C->mul(A, B); // mul - - // clear the temporary matrix - A->clear(); - B->clear(); - C->clear(); - - expandInData += subK * subN; - localGradData += subM * subN; - wgtData += subK * subM; - } - - // shrink one frame outGrad - MatrixPtr oneGradTmp = Matrix::create( - expandInput_->getData(), subK * groups_[inpIdx], subN, false, useGpu_); - MatrixPtr vTmp = Matrix::create( - tgtGradData, 1, - imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channels_[inpIdx], false, - useGpu_); - vTmp->convShrink(*oneGradTmp, imgSizeH_[inpIdx], imgSizeW_[inpIdx], - channels_[inpIdx], filterSize_[inpIdx], - filterSize_[inpIdx], stride_[inpIdx], stride_[inpIdx], - padding_[inpIdx], padding_[inpIdx], - outputH_[inpIdx], outputW_[inpIdx], 1.0f, 1.0f); - vTmp->clear(); - oneGradTmp->clear(); - - // move the data-pointer - tgtGradData += imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channels_[inpIdx]; - } -} - } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index f43b199498ec3..5a4abec14e7d5 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once -#include "ConvBaseLayer.h" +#include "ConvBaseLayerCpu.h" #include "paddle/math/Matrix.h" #include @@ -28,24 +28,11 @@ namespace paddle { * * The config file api is img_conv_layer. */ -class ExpandConvLayer : public ConvBaseLayer { -protected: - /// For expand convolution. - /// subM_ = numFilters_ / groups_. - IntV subM_; - /// subN_ = outputH_ * outputW_. - IntV subN_; - /// subK_ = channels_ * filterPixels_ * groups_. - IntV subK_; - /// Expand one sample at a time. shape: - /// (numChannels * filterPixels_, outputSizeH * outputSizeW) - MatrixPtr expandInput_; - /// The transpose of output, which is an auxiliary matrix. - MatrixPtr transOutValue_; - +class ExpandConvLayer : public ConvBaseLayerCpu { public: - explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} + explicit ExpandConvLayer(const LayerConfig& config) : + ConvBaseLayerCpu(config) {} ~ExpandConvLayer() {} @@ -53,23 +40,8 @@ class ExpandConvLayer : public ConvBaseLayer { size_t getOutputSize(); - /** - * Expand one input sample. - */ - void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx); - - /** - * Expand one input sample and perform matrix multiplication. - */ - void expandFwdOnce(MatrixPtr image, int inIdx, int startIdx); - - void forward(PassType passType); - void bpropSharedBias(MatrixPtr biases, MatrixPtr v); - void bpropBiases(MatrixPtr v); void backward(const UpdateCallback& callback); - void bpropWeights(MatrixPtr v, int inpIdx); - void bpropActs(MatrixPtr v, int inpIdx); }; } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index fb2e7fc4bd6e2..99eb18053d32c 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -29,18 +29,7 @@ REGISTER_LAYER(exconvt, ExpandConvTransLayer); bool ExpandConvTransLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ConvBaseLayer::init(layerMap, parameterMap); - - /* Initialize the projection */ - for (auto &inputConfig : config_.inputs()) { - const ConvConfig &conf = inputConfig.conv_conf(); - subM_.push_back(conf.channels() / conf.groups()); - subN_.push_back(conf.output_x() * conf.output_x()); - subK_.push_back(numFilters_ * conf.filter_size() * conf.filter_size() / - conf.groups()); - /* Consistent caffe mode for multiple input */ - caffeMode_ = conf.caffe_mode(); - } + ConvBaseLayerCpu::init(layerMap, parameterMap); return true; } @@ -73,67 +62,6 @@ size_t ExpandConvTransLayer::getSize() { return layerSize; } -void ExpandConvTransLayer::resetExpandInput(size_t height, size_t width) { - Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); -} - -/*void ExpandConvTransLayer::resetConvOutput(size_t batchSize, int inIdx) { - Matrix::resizeOrCreate(transOutValue_, batchSize * numFilters_, subN_[inIdx], - false, useGpu_); -}*/ - - - -void ExpandConvTransLayer::expandOneFrame(MatrixPtr image, size_t startIdx, - int inIdx) { - resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); - real *imgData = image->getData() + startIdx * image->getWidth(); - MatrixPtr imageTmp = Matrix::create( - imgData, 1, imgSizeH_[inIdx] * imgSizeW_[inIdx] * channel_, false, - useGpu_); - expandInput_->convExpand(*imageTmp, imgSizeH_[inIdx], imgSizeW_[inIdx], - channel_, filterSize_[inIdx], - filterSize_[inIdx], stride_[inIdx], stride_[inIdx], - padding_[inIdx], padding_[inIdx], - outputH_[inIdx], outputW_[inIdx]); - imageTmp->clear(); -} - -void ExpandConvTransLayer::expandBackOnce(MatrixPtr imageGrad, int inIdx, - int startIdx) { - int subM = subM_[inIdx]; - int subN = subN_[inIdx]; - int subK = subK_[inIdx]; - - LayerPtr prevLayer = getPrev(inIdx); - if (NULL == prevLayer->getOutputGrad()) { - return; - } - - expandOneFrame(imageGrad, startIdx, inIdx); - - real *outGradData = - prevLayer -> getOutputGrad()->getData() - + startIdx * subN * numFilters_[inIdx]; - - real *wgtData = weights_[inIdx]->getW()->getData(); - real *expInData = expandInput_->getData(); - for (int g = 0; g < groups_[inIdx]; ++g) { - MatrixPtr A = - Matrix::create(wgtData, subK, subM, true, useGpu_); // mark transpose - MatrixPtr B = Matrix::create(expInData, subK, subN, false, useGpu_); - MatrixPtr C = Matrix::create(outGradData, subM, subN, false, useGpu_); - C->mul(A, B, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - wgtData += subK * subM; - expInData += subK * subN; - outGradData += subM * subN; - } -} - void ExpandConvTransLayer::forward(PassType passType) { Layer::forward(passType); @@ -148,7 +76,7 @@ void ExpandConvTransLayer::forward(PassType passType) { LayerPtr prevLayer = getPrev(i); output = prevLayer->getOutputValue(); REGISTER_TIMER_INFO("shrinkFwd", getName().c_str()); - shrinkFwd(output, i); + bpropActs(output, getOutputValue(), i); } /* add the bias-vector */ @@ -164,84 +92,6 @@ void ExpandConvTransLayer::forward(PassType passType) { forwardActivation(); } -void ExpandConvTransLayer::shrinkFwd(MatrixPtr output, int inpIdx) { - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - - size_t batchSize = output->getHeight(); - MatrixPtr image = getOutputValue(); - - /* reset the expand-grad memory */ - resetExpandInput(subK * groups_[inpIdx], subN); - - real *localData = output->getData(); - real *imageData = image->getData(); - for (size_t n = 0; n < batchSize; n++) { - real *wgtData = weights_[inpIdx]->getW()->getData(); - real *expandInData = expandInput_->getData(); - - for (int g = 0; g < groups_[inpIdx]; g++) { - // create temporary matrix - MatrixPtr C = Matrix::create(expandInData, subK, subN, false, useGpu_); - MatrixPtr B = Matrix::create(localData, subM, subN, false, useGpu_); - MatrixPtr A = Matrix::create(wgtData, subK, subM, false, useGpu_); - C->mul(A, B); // mul - - // clear the temporary matrix - A->clear(); - B->clear(); - C->clear(); - - expandInData += subK * subN; - localData += subM * subN; - wgtData += subK * subM; - } - - // shrink one frame outGrad - MatrixPtr oneTmp = Matrix::create( - expandInput_->getData(), subK * groups_[inpIdx], subN, false, useGpu_); - MatrixPtr vTmp = Matrix::create( - imageData, 1, - imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel_, false, - useGpu_); - vTmp->convShrink(*oneTmp, imgSizeH_[inpIdx], imgSizeW_[inpIdx], - channel_, filterSize_[inpIdx], - filterSize_[inpIdx], stride_[inpIdx], stride_[inpIdx], - padding_[inpIdx], padding_[inpIdx], - outputH_[inpIdx], outputW_[inpIdx], 1.0f, 1.0f); - vTmp->clear(); - oneTmp->clear(); - - // move the data-pointer - imageData += imgSizeH_[inpIdx] * imgSizeW_[inpIdx] * channel_; - } -} - -void ExpandConvTransLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = getSize() / channel_; - size_t mapH = v->getElementCnt() / mapW; - MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); - - Matrix::resizeOrCreate(transOutValue_, mapW, mapH, false, useGpu_); - - vTmp->transpose(transOutValue_, false); // false means no memory allocation - vTmp->reshape(transOutValue_->getElementCnt() / channel_, channel_); - biases->collectBias(*vTmp, 1.0f); -} - -void ExpandConvTransLayer::bpropBiases(MatrixPtr v) { - MatrixPtr biases = - Matrix::create(biases_->getWGrad()->getData(), 1, - biases_->getWGrad()->getElementCnt(), false, useGpu_); - if (sharedBiases_) { - bpropSharedBias(biases, v); - } else { - biases->collectBias(*v, 1.0f); - } - biases->clear(); -} - void ExpandConvTransLayer::backward(const UpdateCallback &callback) { backwardActivation(); @@ -255,51 +105,18 @@ void ExpandConvTransLayer::backward(const UpdateCallback &callback) { for (size_t i = 0; i != inputLayers_.size(); ++i) { /* First, calculate the input layers error */ for (size_t off = 0; off < imageGrad->getHeight(); off++) { - expandBackOnce(imageGrad, i, off); + if (NULL != getPrev(i)->getOutputGrad()) { + expandFwdOnce(imageGrad, getPrev(i)->getOutputGrad(), i, off); + } } if (weights_[i]->getWGrad()) { /* Then, calculate the W-gradient for the current layer */ - bpropWeights(imageGrad, i); + bpropWeights(imageGrad, getPrev(i)->getOutputValue(), i); /* Increasing the number of gradient */ weights_[i]->getParameterPtr()->incUpdate(callback); } } } -void ExpandConvTransLayer::bpropWeights(MatrixPtr v, int inpIdx) { - MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); - MatrixPtr outputV = getPrev(inpIdx)->getOutputValue(); - - int subM = subM_[inpIdx]; - int subN = subN_[inpIdx]; - int subK = subK_[inpIdx]; - size_t batchSize = outputV->getHeight(); - resetExpandInput(subK * groups_[inpIdx], subN); - - real *outputData = outputV -> getData(); - - for (size_t n = 0; n < batchSize; n++) { // frame by frame - // expand - expandOneFrame(v, n, inpIdx); - real *wGradData = weightGrad->getData(); - real *expandInData = expandInput_->getData(); - - // expand-mul one-group by one - for (int g = 0; g < groups_[inpIdx]; g++) { - MatrixPtr A = Matrix::create(expandInData, subK, subN, false, useGpu_); - MatrixPtr B = Matrix::create(outputData, subM, subN, true, useGpu_); - MatrixPtr C = Matrix::create(wGradData, subK, subM, false, useGpu_); - C->mul(A, B, 1, 1); - - A->clear(); - B->clear(); - C->clear(); - outputData += subM * subN; - wGradData += subK * subM; - expandInData += subK * subN; - } - } -} - } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index cbe4da8143cef..214f460d65877 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once -#include "ConvBaseLayer.h" +#include "ConvBaseLayerCpu.h" #include "paddle/math/Matrix.h" #include @@ -24,32 +24,14 @@ namespace paddle { /** * @brief A subclass of convolution layer. * This layer expands input and use matrix multiplication to - * calculate convolution operation. + * calculate convolution transpose (deconv) operation. * * The config file api is img_convTrans_layer. */ -class ExpandConvTransLayer : public ConvBaseLayer { -protected: - /// For expand convolution. - /// subM_ = numFilters_ / groups_. - IntV subM_; - /// subN_ = outputH_ * outputW_. - IntV subN_; - /// subK_ = channels_ * filterPixels_ * groups_. - IntV subK_; - /// The spatial dimensions of height of input feature map. - IntV imgSizeH_; - /// The spatial dimensions of width of input feature map. - IntV imgSizeW_; - /// The spatial dimensions of height of output feature map. - IntV outputH_; - /// The spatial dimensions of width of output feature map. - IntV outputW_; - - +class ExpandConvTransLayer : public ConvBaseLayerCpu { public: explicit ExpandConvTransLayer(const LayerConfig& config) : - ConvBaseLayer(config) {} + ConvBaseLayerCpu(config) {} ~ExpandConvTransLayer() {} @@ -57,38 +39,8 @@ class ExpandConvTransLayer : public ConvBaseLayer { size_t getSize(); - /** - * Create or resize expandInput_. - */ - void resetExpandInput(size_t height, size_t width); - - /** - * Create or resize transOutValue_. - */ - void resetConvOutput(size_t batchSize, int inIdx); - - /** - * Expand one input sample. - */ - void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx); - - /** - * Expand one output image and perform matrix multiplication. - */ - void expandBackOnce(MatrixPtr image, int inIdx, int startIdx); - - /** - * Perform matrix multiplication on one output and then shrink. - */ - void shrinkFwd(MatrixPtr output, int inpIdx); - - void forward(PassType passType); - void bpropSharedBias(MatrixPtr biases, MatrixPtr v); - void bpropBiases(MatrixPtr v); void backward(const UpdateCallback& callback); - void bpropWeights(MatrixPtr v, int inpIdx); - void bpropActs(MatrixPtr v, int inpIdx); }; } // namespace paddle diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index d634d198c3be7..1c27ee2d5f7a3 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -302,6 +302,8 @@ void testConvLayer(const string& type, bool trans, bool useGpu) { config.layerConfig.num_filters()); testLayerGrad(config, "conv", 100, trans, useGpu); + // Use small batch_size and useWeight=true to test biasGrad + testLayerGrad(config, "conv", 2, trans, useGpu, true, 0.02); } TEST(Layer, convLayer) { From e68b50ad0902531d0fc3fa3e9dddb25ad95fd5c3 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 1 Nov 2016 16:36:57 -0700 Subject: [PATCH 129/180] fixed a bug in refactoring ExpandConv/TransLayer --- paddle/gserver/layers/ConvBaseLayerCpu.cpp | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayerCpu.cpp b/paddle/gserver/layers/ConvBaseLayerCpu.cpp index 0da92bf0485b0..13521bcd2b209 100644 --- a/paddle/gserver/layers/ConvBaseLayerCpu.cpp +++ b/paddle/gserver/layers/ConvBaseLayerCpu.cpp @@ -22,11 +22,20 @@ bool ConvBaseLayerCpu::init(const LayerMap &layerMap, /* Initialize the basic convolutional parent class */ ConvBaseLayer::init(layerMap, parameterMap); + /* The class fields channels_ and numFilters_ are the same as in the config + * i.e., channels_ is the for the input and numFilters_ is for the output + * + * But in order for the variables in convTrans having the same semantic + * meaning as in conv, we need to swap channels_ and numFilters here for + * convTrans, and in other functions too. + * */ int channel; + int nf; /* Initialize the projection */ for (auto &inputConfig : config_.inputs()) { const ConvConfig &conf = inputConfig.conv_conf(); - subM_.push_back(numFilters_ / conf.groups()); + nf = isConv_ ? numFilters_ : conf.channels(); + subM_.push_back(nf / conf.groups()); subN_.push_back(conf.output_x() * conf.output_x()); channel = isConv_ ? conf.channels() : numFilters_; subK_.push_back(channel * conf.filter_size() * conf.filter_size() / @@ -123,20 +132,19 @@ void ConvBaseLayerCpu::expandFwdOnce(MatrixPtr image, MatrixPtr out, } } -void ConvBaseLayerCpu::bpropActs(MatrixPtr image, MatrixPtr out, int inpIdx) { +void ConvBaseLayerCpu::bpropActs(MatrixPtr out, MatrixPtr image, int inpIdx) { int channel = isConv_ ? channels_[inpIdx] : numFilters_; int subM = subM_[inpIdx]; int subN = subN_[inpIdx]; int subK = subK_[inpIdx]; size_t batchSize = image->getHeight(); - MatrixPtr tgtGrad = out; /* reset the expand-grad memory */ resetExpandInput(subK * groups_[inpIdx], subN); - real *localGradData = image->getData(); - real *tgtGradData = tgtGrad->getData(); + real *localGradData = out->getData(); + real *tgtGradData = image->getData(); for (size_t n = 0; n < batchSize; n++) { real *wgtData = weights_[inpIdx]->getW()->getData(); real *expandInData = expandInput_->getData(); From 5fff96f5322d8275a1683cdf1efdd29b77c7ee20 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 1 Nov 2016 17:01:58 -0700 Subject: [PATCH 130/180] add another small test in test_LayerGrad for convTransLayer --- paddle/gserver/tests/test_LayerGrad.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 1c27ee2d5f7a3..42c7b13906254 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -345,6 +345,8 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { config.layerConfig.num_filters()); testLayerGrad(config, "convTrans", 100, trans, useGpu); + // Use small batch_size and useWeight=true to test biasGrad + testLayerGrad(config, "convTrans", 2, trans, useGpu, true, 0.02); } TEST(Layer, convTransLayer) { From 5e4cc241ac2cace3f6564070014e4ac59471ab65 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 2 Nov 2016 11:02:30 -0700 Subject: [PATCH 131/180] Revised deconv implementations according to luotao1 --- paddle/gserver/layers/ConvBaseLayer.cpp | 4 +- paddle/gserver/layers/ConvBaseLayer.h | 4 +- ...seLayerCpu.cpp => ExpandConvBaseLayer.cpp} | 34 ++--- ...nvBaseLayerCpu.h => ExpandConvBaseLayer.h} | 13 +- paddle/gserver/layers/ExpandConvLayer.cpp | 13 +- paddle/gserver/layers/ExpandConvLayer.h | 6 +- .../gserver/layers/ExpandConvTransLayer.cpp | 10 +- paddle/gserver/layers/ExpandConvTransLayer.h | 6 +- python/paddle/trainer/config_parser.py | 4 +- .../paddle/trainer_config_helpers/layers.py | 132 ++---------------- 10 files changed, 62 insertions(+), 164 deletions(-) rename paddle/gserver/layers/{ConvBaseLayerCpu.cpp => ExpandConvBaseLayer.cpp} (88%) rename paddle/gserver/layers/{ConvBaseLayerCpu.h => ExpandConvBaseLayer.h} (90%) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 4346cb520ea01..607fb99cf6288 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -22,9 +22,9 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, Layer::init(layerMap, parameterMap); if (config_.type() == "exconv" || config_.type() == "cudnn_conv") { - isConv_ = true; + isDeconv_ = false; } else { - isConv_ = false; + isDeconv_ = true; } /* Initialize the convolutional layer parameter */ diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index ecdc119a94941..2f2ce59ad9e6c 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -28,8 +28,8 @@ class ConvBaseLayer : public Layer { protected: typedef std::vector IntV; - /// True if it's convolution layer, false if it's deconv layer - bool isConv_; + /// True if it's deconv layer, false if it's convolution layer + bool isDeconv_; /// The number of filters. int numFilters_; diff --git a/paddle/gserver/layers/ConvBaseLayerCpu.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp similarity index 88% rename from paddle/gserver/layers/ConvBaseLayerCpu.cpp rename to paddle/gserver/layers/ExpandConvBaseLayer.cpp index 13521bcd2b209..9693ad82450f4 100644 --- a/paddle/gserver/layers/ConvBaseLayerCpu.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -13,11 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ +#include "ExpandConvBaseLayer.h" + #include "paddle/utils/Logging.h" -#include "ConvBaseLayerCpu.h" namespace paddle { -bool ConvBaseLayerCpu::init(const LayerMap &layerMap, +bool ExpandConvBaseLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ ConvBaseLayer::init(layerMap, parameterMap); @@ -34,10 +35,10 @@ bool ConvBaseLayerCpu::init(const LayerMap &layerMap, /* Initialize the projection */ for (auto &inputConfig : config_.inputs()) { const ConvConfig &conf = inputConfig.conv_conf(); - nf = isConv_ ? numFilters_ : conf.channels(); + nf = (!isDeconv_) ? numFilters_ : conf.channels(); subM_.push_back(nf / conf.groups()); subN_.push_back(conf.output_x() * conf.output_x()); - channel = isConv_ ? conf.channels() : numFilters_; + channel = (!isDeconv_) ? conf.channels() : numFilters_; subK_.push_back(channel * conf.filter_size() * conf.filter_size() / conf.groups()); /* Consistent caffe mode for multiple input */ @@ -47,11 +48,11 @@ bool ConvBaseLayerCpu::init(const LayerMap &layerMap, return true; } -void ConvBaseLayerCpu::resetExpandInput(size_t height, size_t width) { +void ExpandConvBaseLayer::resetExpandInput(size_t height, size_t width) { Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); } -void ConvBaseLayerCpu::addSharedBias() { +void ExpandConvBaseLayer::addSharedBias() { size_t mapW = getSize() / numFilters_; size_t mapH = getOutputValue()->getElementCnt() / mapW; MatrixPtr out = @@ -75,7 +76,7 @@ void ConvBaseLayerCpu::addSharedBias() { bias->clear(); } -void ConvBaseLayerCpu::addUnsharedBias() { +void ExpandConvBaseLayer::addUnsharedBias() { MatrixPtr outValue = getOutputValue(); MatrixPtr bias = Matrix::create(biases_->getW()->getData(), 1, @@ -84,9 +85,9 @@ void ConvBaseLayerCpu::addUnsharedBias() { } -void ConvBaseLayerCpu::expandOneFrame(MatrixPtr image, size_t startIdx, +void ExpandConvBaseLayer::expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx) { - int channel = isConv_ ? channels_[inIdx] : numFilters_; + int channel = (!isDeconv_) ? channels_[inIdx] : numFilters_; resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); real *imgData = image->getData() + startIdx * image->getWidth(); @@ -101,7 +102,7 @@ void ConvBaseLayerCpu::expandOneFrame(MatrixPtr image, size_t startIdx, imageTmp->clear(); } -void ConvBaseLayerCpu::expandFwdOnce(MatrixPtr image, MatrixPtr out, +void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, MatrixPtr out, int inIdx, int startIdx) { int subM = subM_[inIdx]; int subN = subN_[inIdx]; @@ -109,7 +110,7 @@ void ConvBaseLayerCpu::expandFwdOnce(MatrixPtr image, MatrixPtr out, expandOneFrame(image, startIdx, inIdx); - int nf = isConv_ ? numFilters_ : channels_[inIdx]; + int nf = (!isDeconv_) ? numFilters_ : channels_[inIdx]; real *outData = out->getData() + startIdx * subN * nf; @@ -132,8 +133,9 @@ void ConvBaseLayerCpu::expandFwdOnce(MatrixPtr image, MatrixPtr out, } } -void ConvBaseLayerCpu::bpropActs(MatrixPtr out, MatrixPtr image, int inpIdx) { - int channel = isConv_ ? channels_[inpIdx] : numFilters_; +void ExpandConvBaseLayer::bpropActs(MatrixPtr out, MatrixPtr image, + int inpIdx) { + int channel = (!isDeconv_) ? channels_[inpIdx] : numFilters_; int subM = subM_[inpIdx]; int subN = subN_[inpIdx]; @@ -186,7 +188,7 @@ void ConvBaseLayerCpu::bpropActs(MatrixPtr out, MatrixPtr image, int inpIdx) { } } -void ConvBaseLayerCpu::bpropWeights(MatrixPtr image, MatrixPtr out, +void ExpandConvBaseLayer::bpropWeights(MatrixPtr image, MatrixPtr out, int inpIdx) { MatrixPtr weightGrad = weights_[inpIdx]->getWGrad(); @@ -221,7 +223,7 @@ void ConvBaseLayerCpu::bpropWeights(MatrixPtr image, MatrixPtr out, } } -void ConvBaseLayerCpu::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { +void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { size_t mapW = getSize() / numFilters_; size_t mapH = v->getElementCnt() / mapW; MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); @@ -234,7 +236,7 @@ void ConvBaseLayerCpu::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { biases->collectBias(*transOutValue_, 1.0f); } -void ConvBaseLayerCpu::bpropBiases(MatrixPtr v) { +void ExpandConvBaseLayer::bpropBiases(MatrixPtr v) { MatrixPtr biases = Matrix::create(biases_->getWGrad()->getData(), 1, biases_->getWGrad()->getElementCnt(), false, useGpu_); diff --git a/paddle/gserver/layers/ConvBaseLayerCpu.h b/paddle/gserver/layers/ExpandConvBaseLayer.h similarity index 90% rename from paddle/gserver/layers/ConvBaseLayerCpu.h rename to paddle/gserver/layers/ExpandConvBaseLayer.h index 08a1426b473a0..418c9dd6ce2ad 100644 --- a/paddle/gserver/layers/ConvBaseLayerCpu.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -25,7 +25,7 @@ namespace paddle { * @brief A subclass of ConvBaseLayer that is a superclass of both * ExpandConvLayer and ExpandConvTransLayer */ -class ConvBaseLayerCpu : public ConvBaseLayer { +class ExpandConvBaseLayer : public ConvBaseLayer { protected: /// For expand convolution. /// subM_ = numFilters_ / groups_. @@ -43,18 +43,19 @@ class ConvBaseLayerCpu : public ConvBaseLayer { /// The spatial dimensions of width of output feature map. IntV outputW_; - /*The expandInput_ and transOutValue_ are used for CPU expand conv calc*/ - /// Expand one sample at a time. shape: - /// (numChannels * filterPixels_, outputSizeH * outputSizeW) + /*The expandInput_ and transOutValue_ are used for CPU expand conv calc + * Expand one sample at a time. shape: + * (numChannels * filterPixels_, outputSizeH * outputSizeW) + * */ MatrixPtr expandInput_; /// The transpose of output, which is an auxiliary matrix. MatrixPtr transOutValue_; public: - explicit ConvBaseLayerCpu(const LayerConfig& config) + explicit ExpandConvBaseLayer(const LayerConfig& config) : ConvBaseLayer(config) {} - ~ConvBaseLayerCpu() {} + ~ExpandConvBaseLayer() {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 5c30c5a1fec7b..379823a6feb45 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -24,7 +24,7 @@ REGISTER_LAYER(exconv, ExpandConvLayer); bool ExpandConvLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ConvBaseLayerCpu::init(layerMap, parameterMap); + ExpandConvBaseLayer::init(layerMap, parameterMap); return true; } @@ -49,16 +49,17 @@ void ExpandConvLayer::forward(PassType passType) { resetOutput(batchSize, getOutputSize()); MatrixPtr image = nullptr; - for (size_t i = 0; i != inputLayers_.size(); ++i) { + MatrixPtr outV = getOutputValue(); + for (size_t i = 0; i < inputLayers_.size(); ++i) { LayerPtr prevLayer = getPrev(i); image = prevLayer->getOutputValue(); for (size_t off = 0; off < image->getHeight(); off++) { REGISTER_TIMER_INFO("expandFwdOnce", getName().c_str()); - expandFwdOnce(image, getOutputValue(), i, off); + expandFwdOnce(image, outV, i, off); } } /* add the bias-vector */ - if (biases_.get() != NULL) { + if (biases_.get()) { if (sharedBiases_) { addSharedBias(); } else { @@ -81,9 +82,9 @@ void ExpandConvLayer::backward(const UpdateCallback &callback) { biases_->getParameterPtr()->incUpdate(callback); } - for (size_t i = 0; i != inputLayers_.size(); ++i) { + for (size_t i = 0; i < inputLayers_.size(); ++i) { /* First, calculate the input layers error */ - if (NULL != getPrev(i)->getOutputGrad()) { + if (getPrev(i)->getOutputGrad()) { bpropActs(outGrad, getPrev(i)->getOutputGrad(), i); } if (weights_[i]->getWGrad()) { diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index 5a4abec14e7d5..b5cb448bdfcde 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once -#include "ConvBaseLayerCpu.h" #include "paddle/math/Matrix.h" #include +#include "ExpandConvBaseLayer.h" namespace paddle { @@ -29,10 +29,10 @@ namespace paddle { * The config file api is img_conv_layer. */ -class ExpandConvLayer : public ConvBaseLayerCpu { +class ExpandConvLayer : public ExpandConvBaseLayer { public: explicit ExpandConvLayer(const LayerConfig& config) : - ConvBaseLayerCpu(config) {} + ExpandConvBaseLayer(config) {} ~ExpandConvLayer() {} diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 99eb18053d32c..1d630a4ecd031 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -29,7 +29,7 @@ REGISTER_LAYER(exconvt, ExpandConvTransLayer); bool ExpandConvTransLayer::init(const LayerMap &layerMap, const ParameterMap ¶meterMap) { /* Initialize the basic convolutional parent class */ - ConvBaseLayerCpu::init(layerMap, parameterMap); + ExpandConvBaseLayer::init(layerMap, parameterMap); return true; } @@ -72,7 +72,7 @@ void ExpandConvTransLayer::forward(PassType passType) { resetOutput(batchSize, getSize()); MatrixPtr output = nullptr; - for (size_t i = 0; i != inputLayers_.size(); ++i) { + for (size_t i = 0; i < inputLayers_.size(); ++i) { LayerPtr prevLayer = getPrev(i); output = prevLayer->getOutputValue(); REGISTER_TIMER_INFO("shrinkFwd", getName().c_str()); @@ -80,7 +80,7 @@ void ExpandConvTransLayer::forward(PassType passType) { } /* add the bias-vector */ - if (biases_.get() != NULL) { + if (biases_.get()) { if (sharedBiases_) { addSharedBias(); } else { @@ -102,10 +102,10 @@ void ExpandConvTransLayer::backward(const UpdateCallback &callback) { biases_->getParameterPtr()->incUpdate(callback); } - for (size_t i = 0; i != inputLayers_.size(); ++i) { + for (size_t i = 0; i < inputLayers_.size(); ++i) { /* First, calculate the input layers error */ for (size_t off = 0; off < imageGrad->getHeight(); off++) { - if (NULL != getPrev(i)->getOutputGrad()) { + if (getPrev(i)->getOutputGrad()) { expandFwdOnce(imageGrad, getPrev(i)->getOutputGrad(), i, off); } } diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index 214f460d65877..ebcb34f073494 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once -#include "ConvBaseLayerCpu.h" #include "paddle/math/Matrix.h" #include +#include "ExpandConvBaseLayer.h" namespace paddle { @@ -28,10 +28,10 @@ namespace paddle { * * The config file api is img_convTrans_layer. */ -class ExpandConvTransLayer : public ConvBaseLayerCpu { +class ExpandConvTransLayer : public ExpandConvBaseLayer { public: explicit ExpandConvTransLayer(const LayerConfig& config) : - ConvBaseLayerCpu(config) {} + ExpandConvBaseLayer(config) {} ~ExpandConvTransLayer() {} diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 95c5f774c6cea..b17ec6c6f6bf7 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1107,7 +1107,7 @@ def parse_conv(conv, input_layer_name, conv_conf): conv_conf.caffe_mode) -def parse_convt(conv, input_layer_name, conv_conf, num_filters): +def parse_conv_trans(conv, input_layer_name, conv_conf, num_filters): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.channels = conv.channels @@ -1683,7 +1683,7 @@ def __init__( for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_convt( + parse_conv_trans( self.inputs[input_index].conv, input_layer.name, self.config.inputs[input_index].conv_conf, num_filters) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 172d45a761ecc..711c9ca993a23 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1515,7 +1515,8 @@ def img_conv_layer(input, filter_size, num_filters, name=None, num_channels=None, act=None, groups=1, stride=1, padding=0, bias_attr=None, param_attr=None, shared_biases=True, layer_attr=None, - filter_size_y=None, stride_y=None, padding_y=None): + filter_size_y=None, stride_y=None, padding_y=None, + trans=False): """ Convolution layer for image. Paddle only support square input currently and thus input image's width equals height. @@ -1523,120 +1524,7 @@ def img_conv_layer(input, filter_size, num_filters, The details of convolution layer, please refer UFLDL's `convolution `_ . - - The num_channel means input image's channel number. It may be 1 or 3 when - input is raw pixels of image(mono or RGB), or it may be the previous layer's - num_filters * num_group. - - There are several group of filter in PaddlePaddle implementation. - Each group will process some channel of the inputs. For example, if an input - num_channel = 256, group = 4, num_filter=32, the PaddlePaddle will create - 32*4 = 128 filters to process inputs. The channels will be split into 4 - pieces. First 256/4 = 64 channels will process by first 32 filters. The - rest channels will be processed by rest group of filters. - - :param name: Layer name. - :type name: basestring - :param input: Layer Input. - :type input: LayerOutput - :param filter_size: The x dimension of a filter kernel. Or input a tuple for - two image dimension. - :type filter_size: int|tuple|list - :param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle - currently supports rectangular filters, the filter's - shape will be (filter_size, filter_size_y). - :type filter_size_y: int|None - :param num_filters: Each filter group's number of filter - :param act: Activation type. Default is tanh - :type act: BaseActivation - :param groups: Group size of filters. - :type groups: int - :param stride: The x dimension of the stride. Or input a tuple for two image - dimension. - :type stride: int|tuple|list - :param stride_y: The y dimension of the stride. - :type stride_y: int - :param padding: The x dimension of the padding. Or input a tuple for two - image dimension - :type padding: int|tuple|list - :param padding_y: The y dimension of the padding. - :type padding_y: int - :param bias_attr: Convolution bias attribute. None means default bias. - False means no bias. - :type bias_attr: ParameterAttribute|False - :param num_channels: number of input channels. If None will be set - automatically from previous output. - :type num_channels: int - :param param_attr: Convolution param attribute. None means default attribute - :type param_attr: ParameterAttribute - :param shared_biases: Is biases will be shared between filters or not. - :type shared_biases: bool - :param layer_attr: Layer Extra Attribute. - :type layer_attr: ExtraLayerAttribute - :return: LayerOutput object. - :rtype: LayerOutput - """ - if num_channels is None: - assert input.num_filters is not None - num_channels = input.num_filters - - if filter_size_y is None: - if isinstance(filter_size, collections.Sequence): - assert len(filter_size) == 2 - filter_size, filter_size_y = filter_size - else: - filter_size_y = filter_size - - if stride_y is None: - if isinstance(stride, collections.Sequence): - assert len(stride) == 2 - stride, stride_y = stride - else: - stride_y = stride - - if padding_y is None: - if isinstance(padding, collections.Sequence): - assert len(padding) == 2 - padding, padding_y = padding - else: - padding_y = padding - - if param_attr.attr.get('initial_smart'): - # special initial for conv layers. - init_w = (2.0 / (filter_size ** 2 * num_channels)) ** 0.5 - param_attr.attr["initial_mean"] = 0.0 - param_attr.attr["initial_std"] = init_w - param_attr.attr["initial_strategy"] = 0 - param_attr.attr["initial_smart"] = False - Layer( - name=name, - inputs=Input(input.name, conv=Conv( - filter_size=filter_size, padding=padding, stride=stride, - channels=num_channels, groups=groups, - filter_size_y=filter_size_y, padding_y=padding_y, - stride_y=stride_y), - **param_attr.attr), - active_type=act.name, - num_filters=num_filters, - bias=ParamAttr.to_bias(bias_attr), - shared_biases=shared_biases, - type=LayerType.CONV_LAYER, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.CONV_LAYER, parents=[input], - activation=act, num_filters=num_filters) - -@wrap_name_default("convt") -@wrap_param_attr_default() -@wrap_bias_attr_default() -@wrap_act_default(act=ReluActivation()) -@layer_support(DROPOUT) -def img_convTrans_layer(input, filter_size, num_filters, - name=None, num_channels=None, - act=None, groups=1, stride=1, padding=0, bias_attr=None, - param_attr=None, shared_biases=True, layer_attr=None, - filter_size_y=None, stride_y=None, padding_y=None): - """ + Convolution Transpose (deconv) layer for image. Paddle only support square input currently and thus input image's width equals height. @@ -1644,7 +1532,6 @@ def img_convTrans_layer(input, filter_size, num_filters, please refer to the following explanation and references therein `_ . - The num_channel means input image's channel number. It may be 1 or 3 when input is raw pixels of image(mono or RGB), or it may be the previous layer's num_filters * num_group. @@ -1694,6 +1581,8 @@ def img_convTrans_layer(input, filter_size, num_filters, :type shared_biases: bool :param layer_attr: Layer Extra Attribute. :type layer_attr: ExtraLayerAttribute + :param trans: true if it is a convTransLayer, false if it is a convLayer + :type trans: bool :return: LayerOutput object. :rtype: LayerOutput """ @@ -1729,6 +1618,12 @@ def img_convTrans_layer(input, filter_size, num_filters, param_attr.attr["initial_std"] = init_w param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False + + if trans: + lt = LayerType.CONVTRANS_LAYER + else: + lt = LayerType.CONV_LAYER + Layer( name=name, inputs=Input(input.name, conv=Conv( @@ -1741,14 +1636,13 @@ def img_convTrans_layer(input, filter_size, num_filters, num_filters=num_filters, bias=ParamAttr.to_bias(bias_attr), shared_biases=shared_biases, - type=LayerType.CONVTRANS_LAYER, + type=lt, **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.CONVTRANS_LAYER, parents=[input], + return LayerOutput(name, lt, parents=[input], activation=act, num_filters=num_filters) - @wrap_name_default("pool") @layer_support() def img_pool_layer(input, pool_size, name=None, From 3d72e94939c2d5afbbca8fa89c310d239d6c0a6c Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 2 Nov 2016 16:10:54 -0700 Subject: [PATCH 132/180] rebase deconv implementation with develop branch and resolve conflicts with pull#218 commit 45c81a414f9 --- paddle/gserver/layers/ConvBaseLayer.cpp | 69 ++++++++++++++----- paddle/gserver/layers/ExpandConvBaseLayer.cpp | 16 ++++- paddle/gserver/layers/ExpandConvBaseLayer.h | 9 +-- paddle/gserver/layers/ExpandConvLayer.cpp | 10 --- paddle/gserver/layers/ExpandConvLayer.h | 2 - .../gserver/layers/ExpandConvTransLayer.cpp | 30 +------- paddle/gserver/layers/ExpandConvTransLayer.h | 2 - paddle/gserver/tests/test_ConvTrans.cpp | 8 +-- python/paddle/trainer/config_parser.py | 12 ++-- 9 files changed, 78 insertions(+), 80 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 607fb99cf6288..5bc22f477932c 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -48,8 +48,20 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, outputW_.push_back(conf.output_x()); } + CHECK(inputLayers_.size() == parameters_.size()); + for (size_t i = 0; i < inputLayers_.size(); i++) { + size_t height, width; + height = filterPixels_[i] * filterChannels_[i]; + width = (!isDeconv_) ? numFilters_ : channels_[i]; + + // create a new weight + CHECK_EQ(parameters_[i]->getSize(), width * height); + Weight* w = new Weight(height, width, parameters_[i]); + weights_.emplace_back(w); + } + /* initialize the biases_ */ - if (biasParameter_.get() != NULL) { + if (biasParameter_.get()) { if (sharedBiases_) { CHECK_EQ((size_t)numFilters_, biasParameter_->getSize()); biases_ = @@ -76,25 +88,46 @@ size_t ConvBaseLayer::calOutputSize() { clearAndReserve(&outputH_); clearAndReserve(&outputW_); size_t layerSize = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (imgSizeH_[i] == 0) - imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); - if (imgSizeW_[i] == 0) - imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); - outputH_.push_back(outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], - strideY_[i], caffeMode_)); - outputW_.push_back(outputSize(imgSizeW_[i], filterSize_[i], padding_[i], - stride_[i], caffeMode_)); - CHECK_EQ(outputH_[i], outputH_[0]); - CHECK_EQ(outputW_[i], outputW_[0]); + + if (!isDeconv_) { + for (size_t i = 0; i < inputLayers_.size(); i++) { + imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (imgSizeH_[i] == 0) + imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); + if (imgSizeW_[i] == 0) + imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); + outputH_.push_back( + outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outputW_.push_back( + outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(outputH_[i], outputH_[0]); + CHECK_EQ(outputW_[i], outputW_[0]); + } + getOutput().setFrameHeight(outputH_[0]); + getOutput().setFrameWidth(outputW_[0]); + layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); + } else { + for (size_t i = 0; i < inputLayers_.size(); i++) { + outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (outputH_[i] == 0) + outputH_[i] = config_.inputs(i).conv_conf().output_x(); + if (outputW_[i] == 0) + outputW_[i] = config_.inputs(i).conv_conf().output_x(); + imgSizeH_.push_back( + imageSize(outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + imgSizeW_.push_back( + imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(imgSizeH_[i], imgSizeH_[0]); + CHECK_EQ(imgSizeW_[i], imgSizeW_[0]); + } + getOutput().setFrameHeight(imgSizeH_[0]); + getOutput().setFrameWidth(imgSizeW_[0]); + layerSize = imgSizeH_[0] * imgSizeW_[0] * size_t(numFilters_); } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); - layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); - return layerSize; + return layerSize; } } // namespace paddle diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index 9693ad82450f4..75ac8245d8829 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -45,15 +45,27 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap, caffeMode_ = conf.caffe_mode(); } + getOutputSize(); + return true; } +size_t ExpandConvBaseLayer::getOutputSize() { + CHECK_NE(inputLayers_.size(), 0UL); + size_t layerSize = ConvBaseLayer::calOutputSize(); + subN_.clear(); + for (size_t i = 0; i < inputLayers_.size(); i++) { + subN_.push_back(outputH_[i] * outputW_[i]); + } + return layerSize; +} + void ExpandConvBaseLayer::resetExpandInput(size_t height, size_t width) { Matrix::resizeOrCreate(expandInput_, height, width, false, useGpu_); } void ExpandConvBaseLayer::addSharedBias() { - size_t mapW = getSize() / numFilters_; + size_t mapW = getOutputSize() / numFilters_; size_t mapH = getOutputValue()->getElementCnt() / mapW; MatrixPtr out = Matrix::create(getOutputValue()->getData(), mapH, mapW, false, useGpu_); @@ -224,7 +236,7 @@ void ExpandConvBaseLayer::bpropWeights(MatrixPtr image, MatrixPtr out, } void ExpandConvBaseLayer::bpropSharedBias(MatrixPtr biases, MatrixPtr v) { - size_t mapW = getSize() / numFilters_; + size_t mapW = getOutputSize() / numFilters_; size_t mapH = v->getElementCnt() / mapW; MatrixPtr vTmp = Matrix::create(v->getData(), mapH, mapW, false, useGpu_); diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.h b/paddle/gserver/layers/ExpandConvBaseLayer.h index 418c9dd6ce2ad..9858fa348c3fc 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.h +++ b/paddle/gserver/layers/ExpandConvBaseLayer.h @@ -34,14 +34,6 @@ class ExpandConvBaseLayer : public ConvBaseLayer { IntV subN_; /// subK_ = channels_ * filterPixels_ * groups_. IntV subK_; - /// The spatial dimensions of height of input feature map. - IntV imgSizeH_; - /// The spatial dimensions of width of input feature map. - IntV imgSizeW_; - /// The spatial dimensions of height of output feature map. - IntV outputH_; - /// The spatial dimensions of width of output feature map. - IntV outputW_; /*The expandInput_ and transOutValue_ are used for CPU expand conv calc * Expand one sample at a time. shape: @@ -59,6 +51,7 @@ class ExpandConvBaseLayer : public ConvBaseLayer { bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); + size_t getOutputSize(); /** * Create or resize expandInput_. */ diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 379823a6feb45..9f30fcf00a422 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -28,16 +28,6 @@ bool ExpandConvLayer::init(const LayerMap &layerMap, return true; } -size_t ExpandConvLayer::getOutputSize() { - CHECK_NE(inputLayers_.size(), 0UL); - size_t layerSize = ConvBaseLayer::calOutputSize(); - subN_.clear(); - for (size_t i = 0; i < inputLayers_.size(); i++) { - subN_.push_back(outputH_[i] * outputW_[i]); - } - return layerSize; -} - void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/gserver/layers/ExpandConvLayer.h index b5cb448bdfcde..c07188a406183 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/gserver/layers/ExpandConvLayer.h @@ -38,8 +38,6 @@ class ExpandConvLayer : public ExpandConvBaseLayer { bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - size_t getOutputSize(); - void forward(PassType passType); void backward(const UpdateCallback& callback); }; diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 1d630a4ecd031..4c4016c30168f 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -34,34 +34,6 @@ bool ExpandConvTransLayer::init(const LayerMap &layerMap, return true; } -// Why this is necessary after calling init? -size_t ExpandConvTransLayer::getSize() { - CHECK_NE(inputLayers_.size(), 0UL); - imgSizeH_.clear(); - imgSizeW_.clear(); - outputH_.clear(); - outputW_.clear(); - subN_.clear(); - size_t layerSize = 0; - for (size_t i = 0; i < inputLayers_.size(); i++) { - outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (outputH_[i] == 0) outputH_[i] = outputX_[i]; - if (outputW_[i] == 0) outputW_[i] = outputX_[i]; - imgSizeH_.push_back( - imageSize(outputH_[i], filterSize_[i], padding_[i], stride_[i])); - imgSizeW_.push_back( - imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); - subN_.push_back(outputH_[i] * outputW_[i]); - CHECK(layerSize == 0 || - imgSizeH_[i] * imgSizeW_[i] * (size_t)numFilters_ == layerSize); - layerSize = imgSizeH_[i] * imgSizeW_[i] * numFilters_; - } - getOutput().setFrameHeight(imgSizeH_[0]); - getOutput().setFrameWidth(imgSizeW_[0]); - return layerSize; -} - void ExpandConvTransLayer::forward(PassType passType) { Layer::forward(passType); @@ -69,7 +41,7 @@ void ExpandConvTransLayer::forward(PassType passType) { /* note: one sample correspond to one colum, and the * transOutValue correspond sample to one row */ int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); - resetOutput(batchSize, getSize()); + resetOutput(batchSize, getOutputSize()); MatrixPtr output = nullptr; for (size_t i = 0; i < inputLayers_.size(); ++i) { diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index ebcb34f073494..d0c0469c351aa 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -37,8 +37,6 @@ class ExpandConvTransLayer : public ExpandConvBaseLayer { bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); - size_t getSize(); - void forward(PassType passType); void backward(const UpdateCallback& callback); }; diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 787113d242391..756faf26516fe 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -43,11 +43,11 @@ TEST(Layer, convTransLayerFwd) { configt.layerConfig.set_partial_sum(1); configt.layerConfig.set_shared_biases(true); - configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 288}); + configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384}); LayerInputConfig* input = configt.layerConfig.add_inputs(); ConvConfig* conv = input->mutable_conv_conf(); conv->set_filter_size(2); - conv->set_filter_size_y(3); + conv->set_filter_size_y(4); conv->set_channels(16); conv->set_padding(0); conv->set_padding_y(1); @@ -86,11 +86,11 @@ TEST(Layer, convTransLayerFwd) { config.layerConfig.set_partial_sum(1); config.layerConfig.set_shared_biases(true); - config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 288}); + config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384}); input = config.layerConfig.add_inputs(); conv = input->mutable_conv_conf(); conv->set_filter_size(2); - conv->set_filter_size_y(3); + conv->set_filter_size_y(4); conv->set_channels(3); conv->set_padding(0); conv->set_padding_y(1); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index b17ec6c6f6bf7..b3d17a47a965f 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1670,11 +1670,13 @@ def __init__( if self.layer_type == "cudnn_convt": config_assert(use_gpu, "cudnn_convt only support GPU") - if (use_gpu == 1 and self.layer_type != "exconvt" and - (parallel_nn == 0 or self.config.device > -1)): - self.layer_type = "cudnn_convt" - else: - self.layer_type = "exconvt" +# if (use_gpu == 1 and self.layer_type != "exconvt" and +# (parallel_nn == 0 or self.config.device > -1)): +# self.layer_type = "cudnn_convt" +# else: +# self.layer_type = "exconvt" + # cudnn_convt has not been implemented so use exconvt only + self.layer_type = "exconvt" # need to specify layer in config self.config.type = self.layer_type From fb20187aaa30b7c049c1dee2ad62bde7005b5af8 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 3 Nov 2016 11:25:34 -0700 Subject: [PATCH 133/180] deconv layer implementation modification following luotao1 comments --- paddle/gserver/layers/ConvBaseLayer.cpp | 65 +++++++------- paddle/gserver/layers/ConvBaseLayer.h | 2 - paddle/gserver/layers/ExpandConvBaseLayer.cpp | 16 ++-- paddle/gserver/tests/test_ConvTrans.cpp | 63 +++++++------- paddle/gserver/tests/test_LayerGrad.cpp | 6 +- python/paddle/trainer/config_parser.py | 87 +++++++++---------- .../paddle/trainer_config_helpers/layers.py | 2 +- 7 files changed, 115 insertions(+), 126 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 5bc22f477932c..733065a753772 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -89,42 +89,41 @@ size_t ConvBaseLayer::calOutputSize() { clearAndReserve(&outputW_); size_t layerSize = 0; - if (!isDeconv_) { + auto setLayerSize = [&](IntV& inH, IntV& inW, IntV& outH, IntV& outW) { for (size_t i = 0; i < inputLayers_.size(); i++) { - imgSizeH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - imgSizeW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (imgSizeH_[i] == 0) - imgSizeH_[i] = config_.inputs(i).conv_conf().img_size(); - if (imgSizeW_[i] == 0) - imgSizeW_[i] = config_.inputs(i).conv_conf().img_size(); - outputH_.push_back( - outputSize(imgSizeH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); - outputW_.push_back( - outputSize(imgSizeW_[i], filterSize_[i], padding_[i], stride_[i])); - CHECK_EQ(outputH_[i], outputH_[0]); - CHECK_EQ(outputW_[i], outputW_[0]); + inH.push_back(inputLayers_[i]->getOutput().getFrameHeight()); + inW.push_back(inputLayers_[i]->getOutput().getFrameWidth()); + if (isDeconv_) { + if (inH[i] == 0) + inH[i] = config_.inputs(i).conv_conf().output_x(); + if (inW[i] == 0) + inW[i] = config_.inputs(i).conv_conf().output_x(); + outH.push_back( + imageSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outW.push_back( + imageSize(inW[i], filterSize_[i], padding_[i], stride_[i])); + } else { + if (inH[i] == 0) + inH[i] = config_.inputs(i).conv_conf().img_size(); + if (inW[i] == 0) + inW[i] = config_.inputs(i).conv_conf().img_size(); + outH.push_back( + outputSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outW.push_back( + outputSize(inW[i], filterSize_[i], padding_[i], stride_[i])); + CHECK_EQ(outH[i], outH[0]); + CHECK_EQ(outW[i], outW[0]); + } } - getOutput().setFrameHeight(outputH_[0]); - getOutput().setFrameWidth(outputW_[0]); - layerSize = outputH_[0] * outputW_[0] * size_t(numFilters_); + getOutput().setFrameHeight(outH[0]); + getOutput().setFrameWidth(outW[0]); + layerSize = outH[0] * outW[0] * size_t(numFilters_); + }; + + if (isDeconv_) { + setLayerSize(outputH_, outputW_, imgSizeH_, imgSizeW_); } else { - for (size_t i = 0; i < inputLayers_.size(); i++) { - outputH_.push_back(inputLayers_[i]->getOutput().getFrameHeight()); - outputW_.push_back(inputLayers_[i]->getOutput().getFrameWidth()); - if (outputH_[i] == 0) - outputH_[i] = config_.inputs(i).conv_conf().output_x(); - if (outputW_[i] == 0) - outputW_[i] = config_.inputs(i).conv_conf().output_x(); - imgSizeH_.push_back( - imageSize(outputH_[i], filterSizeY_[i], paddingY_[i], strideY_[i])); - imgSizeW_.push_back( - imageSize(outputW_[i], filterSize_[i], padding_[i], stride_[i])); - CHECK_EQ(imgSizeH_[i], imgSizeH_[0]); - CHECK_EQ(imgSizeW_[i], imgSizeW_[0]); - } - getOutput().setFrameHeight(imgSizeH_[0]); - getOutput().setFrameWidth(imgSizeW_[0]); - layerSize = imgSizeH_[0] * imgSizeW_[0] * size_t(numFilters_); + setLayerSize(imgSizeH_, imgSizeW_, outputH_, outputW_); } return layerSize; diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index 2f2ce59ad9e6c..4d5b2b8d05af7 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -78,8 +78,6 @@ class ConvBaseLayer : public Layer { /// of output size. bool caffeMode_; - - public: explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {} diff --git a/paddle/gserver/layers/ExpandConvBaseLayer.cpp b/paddle/gserver/layers/ExpandConvBaseLayer.cpp index 75ac8245d8829..0bab0ca764f4f 100644 --- a/paddle/gserver/layers/ExpandConvBaseLayer.cpp +++ b/paddle/gserver/layers/ExpandConvBaseLayer.cpp @@ -31,14 +31,14 @@ bool ExpandConvBaseLayer::init(const LayerMap &layerMap, * convTrans, and in other functions too. * */ int channel; - int nf; + int numFilters; /* Initialize the projection */ for (auto &inputConfig : config_.inputs()) { const ConvConfig &conf = inputConfig.conv_conf(); - nf = (!isDeconv_) ? numFilters_ : conf.channels(); - subM_.push_back(nf / conf.groups()); + numFilters = isDeconv_ ? conf.channels() : numFilters_; + subM_.push_back(numFilters / conf.groups()); subN_.push_back(conf.output_x() * conf.output_x()); - channel = (!isDeconv_) ? conf.channels() : numFilters_; + channel = isDeconv_ ? numFilters_ : conf.channels(); subK_.push_back(channel * conf.filter_size() * conf.filter_size() / conf.groups()); /* Consistent caffe mode for multiple input */ @@ -99,7 +99,7 @@ void ExpandConvBaseLayer::addUnsharedBias() { void ExpandConvBaseLayer::expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx) { - int channel = (!isDeconv_) ? channels_[inIdx] : numFilters_; + int channel = isDeconv_ ? numFilters_ : channels_[inIdx]; resetExpandInput(subK_[inIdx] * groups_[inIdx], subN_[inIdx]); real *imgData = image->getData() + startIdx * image->getWidth(); @@ -122,10 +122,10 @@ void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, MatrixPtr out, expandOneFrame(image, startIdx, inIdx); - int nf = (!isDeconv_) ? numFilters_ : channels_[inIdx]; + int numFilters = isDeconv_ ? channels_[inIdx] : numFilters_; real *outData = - out->getData() + startIdx * subN * nf; + out->getData() + startIdx * subN * numFilters; real *wgtData = weights_[inIdx]->getW()->getData(); real *expInData = expandInput_->getData(); @@ -147,7 +147,7 @@ void ExpandConvBaseLayer::expandFwdOnce(MatrixPtr image, MatrixPtr out, void ExpandConvBaseLayer::bpropActs(MatrixPtr out, MatrixPtr image, int inpIdx) { - int channel = (!isDeconv_) ? channels_[inpIdx] : numFilters_; + int channel = isDeconv_ ? numFilters_ : channels_[inpIdx]; int subM = subM_[inpIdx]; int subN = subN_[inpIdx]; diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 756faf26516fe..9246484ba22c2 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -189,58 +189,55 @@ void doOneConvtTest(size_t imgSize, size_t output_x, size_t stride, } TEST(Layer, convTransLayerFwd2) { - size_t imgSize, output_x, stride, padding, filter_size; MatrixPtr result; - - imgSize = 5; - output_x = 1; - stride = 1; - padding = 0; - filter_size = 5; - result = Matrix::create(1, imgSize * imgSize, false, false); + result = Matrix::create(1, 5 * 5, false, false); result->zeroMem(); result->add(1.0); - doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); + doOneConvtTest(/* imgSize */ 5, + /* output_x */ 1, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 5, + result); - imgSize = 5; - output_x = 2; - stride = 1; - padding = 0; - filter_size = 4; float resultData[] = {1, 2, 2, 2, 1, 2, 4, 4, 4, 2, 2, 4, 4, 4, 2, 2, 4, 4, 4, 2, 1, 2, 2, 2, 1}; - result = Matrix::create(resultData, 1, imgSize * imgSize, false, false); - doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); - - imgSize = 5; - output_x = 2; - stride = 2; - padding = 1; - filter_size = 5; + result->setData(resultData); + doOneConvtTest(/* imgSize */ 5, + /* output_x */ 2, + /* stride */ 1, + /* padding */ 0, + /* filter_size */ 4, + result); + float resultData2[] = {1, 2, 2, 2, 1, 2, 4, 4, 4, 2, 2, 4, 4, 4, 2, 2, 4, 4, 4, 2, 1, 2, 2, 2, 1}; - result = Matrix::create(resultData2, 1, imgSize * imgSize, false, false); - doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); - - imgSize = 5; - output_x = 2; - stride = 2; - padding = 0; - filter_size = 3; + result->setData(resultData2); + doOneConvtTest(/* imgSize */ 5, + /* output_x */ 2, + /* stride */ 2, + /* padding */ 1, + /* filter_size */ 5, + result); + float resultData3[] = {1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 2, 2, 4, 2, 2, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1}; - result = Matrix::create(resultData3, 1, imgSize * imgSize, false, false); - doOneConvtTest(imgSize, output_x, stride, padding, filter_size, result); -} + result->setData(resultData3); + doOneConvtTest(/* imgSize */ 5, + /* output_x */ 2, + /* stride */ 2, + /* padding */ 0, + /* filter_size */ 3, + result);} int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 42c7b13906254..9e2e5ebaac24e 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -351,12 +351,10 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { TEST(Layer, convTransLayer) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ false); -/* #ifndef PADDLE_ONLY_CPU - testConvLayer("exconv", trans= false, useGpu= true); - testConvLayer("cudnn_conv", trans= false, useGpu= true); + testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ true); + // testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true); #endif -*/ } TEST(Layer, blockExpandLayer) { diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index b3d17a47a965f..3aa5576c3cf06 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1082,7 +1082,11 @@ def parse_norm(norm, input_layer_name, norm_conf): else: norm_conf.scale /= norm.size ** 2 -def parse_conv(conv, input_layer_name, conv_conf): +''' +caffe_mode: compute the output size using floor instead of ceil, + which is consistent of caffe and CuDNN's convention. +''' +def parse_conv(conv, input_layer_name, conv_conf, trans=False): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.channels = conv.channels @@ -1093,49 +1097,41 @@ def parse_conv(conv, input_layer_name, conv_conf): conv_conf.groups = conv.groups conv_conf.filter_channels = conv.channels / conv.groups conv_conf.caffe_mode = conv.caffe_mode - - img_pixels = g_layer_map[input_layer_name].size / conv.channels - print('channels=%d size=%d'%(conv.channels, - g_layer_map[input_layer_name].size)) - conv_conf.img_size = int(img_pixels ** 0.5) - config_assert((conv_conf.img_size ** 2) == img_pixels, - ("Input layer %s: Incorrect input image size %d for input " - + "image pixels %d") - % (input_layer_name, conv_conf.img_size, img_pixels)) - conv_conf.output_x = cnn_output_size(conv_conf.img_size, conv_conf.filter_size, - conv_conf.padding, conv_conf.stride, - conv_conf.caffe_mode) - - -def parse_conv_trans(conv, input_layer_name, conv_conf, num_filters): - conv_conf.filter_size = conv.filter_size - conv_conf.filter_size_y = conv.filter_size_y - conv_conf.channels = conv.channels - conv_conf.padding = conv.padding - conv_conf.padding_y = conv.padding_y - conv_conf.stride = conv.stride - conv_conf.stride_y = conv.stride_y - conv_conf.groups = conv.groups - conv_conf.filter_channels = num_filters / conv.groups - conv_conf.caffe_mode = conv.caffe_mode - - outputSize = g_layer_map[input_layer_name].size / conv.channels - print('channels=%d size=%d'%(conv.channels, - g_layer_map[input_layer_name].size)) - conv_conf.output_x = int(outputSize ** 0.5) - config_assert((conv_conf.output_x ** 2) == outputSize, - ("Input layer %s: Incorrect input image size %d for input " - + "image pixels %d") - % (input_layer_name, conv_conf.output_x, outputSize)) - if conv.caffe_mode: - conv_conf.img_size = \ - (conv_conf.output_x - 1) * conv.stride \ - + conv.filter_size - 2 * conv.padding + + if not trans: + img_pixels = g_layer_map[input_layer_name].size / conv.channels + print('channels=%d size=%d'%(conv.channels, + g_layer_map[input_layer_name].size)) + conv_conf.img_size = int(img_pixels ** 0.5) + config_assert((conv_conf.img_size ** 2) == img_pixels, + ("Input layer %s: Incorrect input image size %d for input " + + "image pixels %d") + % (input_layer_name, conv_conf.img_size, img_pixels)) + if conv.caffe_mode: + conv_conf.output_x = \ + 1 + int(math.floor((2 * conv.padding + conv_conf.img_size \ + - conv.filter_size) / float(conv.stride))) + else: + conv_conf.output_x = \ + 1 + int(math.ceil((2 * conv.padding + conv_conf.img_size \ + - conv.filter_size) / float(conv.stride))) else: - conv_conf.img_size = \ - (conv_conf.output_x - 2) * conv.stride \ - + conv.filter_size - 2 * conv.padding + 1 - + outputSize = g_layer_map[input_layer_name].size / conv.channels + print('channels=%d size=%d'%(conv.channels, + g_layer_map[input_layer_name].size)) + conv_conf.output_x = int(outputSize ** 0.5) + config_assert((conv_conf.output_x ** 2) == outputSize, + ("Input layer %s: Incorrect input image size %d for input " + + "image pixels %d") + % (input_layer_name, conv_conf.output_x, outputSize)) + if conv.caffe_mode: + conv_conf.img_size = \ + (conv_conf.output_x - 1) * conv.stride \ + + conv.filter_size - 2 * conv.padding + else: + conv_conf.img_size = \ + (conv_conf.output_x - 2) * conv.stride \ + + conv.filter_size - 2 * conv.padding + 1 def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels @@ -1685,10 +1681,11 @@ def __init__( for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_conv_trans( + parse_conv( self.inputs[input_index].conv, input_layer.name, - self.config.inputs[input_index].conv_conf, num_filters) + self.config.inputs[input_index].conv_conf, num_filters, + trans=True) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) print("output size for %s is %d " % (name, conv_conf.output_x)) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 711c9ca993a23..1fdb1a849e86b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -36,7 +36,7 @@ "pooling_layer", "lstmemory", "last_seq", "first_seq", "cos_sim", "hsigmoid", "conv_projection", "regression_cost", 'classification_cost', "LayerOutput", - 'img_conv_layer', 'img_convTrans_layer', 'img_pool_layer', 'batch_norm_layer', + 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', 'img_cmrnorm_layer', 'addto_layer', 'concat_layer', 'lstm_step_layer', 'recurrent_group', 'memory', 'StaticInput', 'expand_layer', 'scaling_layer', From d116b17f060cfad17c381a9f978d81dadf9fa81e Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Thu, 3 Nov 2016 17:30:02 -0700 Subject: [PATCH 134/180] fix a small bug in ConvTransLayerBase in config_parser.py --- python/paddle/trainer/config_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 3aa5576c3cf06..5ee46cd5f788b 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1684,7 +1684,7 @@ def __init__( parse_conv( self.inputs[input_index].conv, input_layer.name, - self.config.inputs[input_index].conv_conf, num_filters, + self.config.inputs[input_index].conv_conf, trans=True) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) From 7a322df0a8e9bf06be32eb2433445d2bc4f22997 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 4 Nov 2016 10:15:57 -0700 Subject: [PATCH 135/180] deconv implementation mionr changes in ConvBaseLayer.cpp and config_parser.py --- paddle/gserver/layers/ConvBaseLayer.cpp | 12 ++++-------- python/paddle/trainer/config_parser.py | 5 ----- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index 733065a753772..b9359867b9cce 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -20,12 +20,8 @@ bool ConvBaseLayer::init(const LayerMap& layerMap, const ParameterMap& parameterMap) { /* Initialize the basic parent class */ Layer::init(layerMap, parameterMap); - - if (config_.type() == "exconv" || config_.type() == "cudnn_conv") { - isDeconv_ = false; - } else { - isDeconv_ = true; - } + isDeconv_ = (config_.type() == "exconv" || config_.type() == "cudnn_conv") + ? false : true; /* Initialize the convolutional layer parameter */ numFilters_ = config_.num_filters(); @@ -111,9 +107,9 @@ size_t ConvBaseLayer::calOutputSize() { outputSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i])); outW.push_back( outputSize(inW[i], filterSize_[i], padding_[i], stride_[i])); - CHECK_EQ(outH[i], outH[0]); - CHECK_EQ(outW[i], outW[0]); } + CHECK_EQ(outH[i], outH[0]); + CHECK_EQ(outW[i], outW[0]); } getOutput().setFrameHeight(outH[0]); getOutput().setFrameWidth(outW[0]); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 5ee46cd5f788b..b75c2618411d3 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1666,11 +1666,6 @@ def __init__( if self.layer_type == "cudnn_convt": config_assert(use_gpu, "cudnn_convt only support GPU") -# if (use_gpu == 1 and self.layer_type != "exconvt" and -# (parallel_nn == 0 or self.config.device > -1)): -# self.layer_type = "cudnn_convt" -# else: -# self.layer_type = "exconvt" # cudnn_convt has not been implemented so use exconvt only self.layer_type = "exconvt" # need to specify layer in config From 03f4b1d4d2073248acedef00eb0d313f03ebf789 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Mon, 7 Nov 2016 09:49:33 -0800 Subject: [PATCH 136/180] minor changes on deconv per luotao1 comments --- paddle/gserver/tests/test_LayerGrad.cpp | 1 - python/paddle/trainer_config_helpers/layers.py | 5 +---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 9e2e5ebaac24e..0fed97a73b0a4 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -353,7 +353,6 @@ TEST(Layer, convTransLayer) { testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ false); #ifndef PADDLE_ONLY_CPU testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ true); - // testConvLayer("cudnn_conv", /* trans= */ false, /* useGpu= */ true); #endif } diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 1fdb1a849e86b..ecc73c34a833c 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1619,10 +1619,7 @@ def img_conv_layer(input, filter_size, num_filters, param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False - if trans: - lt = LayerType.CONVTRANS_LAYER - else: - lt = LayerType.CONV_LAYER + lt = LayerType.CONVTRANS_LAYER if trans else LayerType.CONV_LAYER Layer( name=name, From 53e1629a43af7dc0e3b7d5d02745632e399aa6a8 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 14:59:12 -0800 Subject: [PATCH 137/180] Refactored imageSize in ConvBaseLayer to MathUtil --- paddle/gserver/layers/ConvBaseLayer.cpp | 13 ++++++--- paddle/gserver/layers/ConvBaseLayer.h | 37 ------------------------- paddle/gserver/tests/test_ConvTrans.cpp | 16 +++++------ paddle/gserver/tests/test_LayerGrad.cpp | 7 ++--- paddle/math/MathUtils.cpp | 13 +++++++++ paddle/math/MathUtils.h | 3 ++ python/paddle/trainer/config_parser.py | 12 +++----- 7 files changed, 39 insertions(+), 62 deletions(-) diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/gserver/layers/ConvBaseLayer.cpp index b9359867b9cce..6bc3b3b801796 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/gserver/layers/ConvBaseLayer.cpp @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/utils/Logging.h" #include "ConvBaseLayer.h" +#include "paddle/math/MathUtils.h" namespace paddle { bool ConvBaseLayer::init(const LayerMap& layerMap, @@ -95,18 +96,22 @@ size_t ConvBaseLayer::calOutputSize() { if (inW[i] == 0) inW[i] = config_.inputs(i).conv_conf().output_x(); outH.push_back( - imageSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + imageSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i], + caffeMode_)); outW.push_back( - imageSize(inW[i], filterSize_[i], padding_[i], stride_[i])); + imageSize(inW[i], filterSize_[i], padding_[i], stride_[i], + caffeMode_)); } else { if (inH[i] == 0) inH[i] = config_.inputs(i).conv_conf().img_size(); if (inW[i] == 0) inW[i] = config_.inputs(i).conv_conf().img_size(); outH.push_back( - outputSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i])); + outputSize(inH[i], filterSizeY_[i], paddingY_[i], strideY_[i], + caffeMode_)); outW.push_back( - outputSize(inW[i], filterSize_[i], padding_[i], stride_[i])); + outputSize(inW[i], filterSize_[i], padding_[i], stride_[i], + caffeMode_)); } CHECK_EQ(outH[i], outH[0]); CHECK_EQ(outW[i], outW[0]); diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/gserver/layers/ConvBaseLayer.h index 4d5b2b8d05af7..b80cab899585e 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/gserver/layers/ConvBaseLayer.h @@ -91,43 +91,6 @@ class ConvBaseLayer : public Layer { virtual size_t calOutputSize(); Weight& getWeight(int idx) { return *weights_[idx]; } - - /** - * Calculate output size based on caffeMode_. - * - input(+padding): 0123456789 - * - imageSize(+padding) = 10; - * - filterSize = 3; - * - stride = 2; - * - caffeMode_ is true: - - output: (012), (234), (456), (678) - - outputSize = 4; - * - caffeMode_ is false: - * - output: (012), (234), (456), (678), (9) - * - outputSize = 5; - */ - int outputSize(int imageSize, int filterSize, int padding, int stride) { - int outputSize; - if (!caffeMode_) { - outputSize = - (imageSize - filterSize + 2 * padding + stride - 1) / stride + 1; - } else { - outputSize = (imageSize - filterSize + 2 * padding) / stride + 1; - } - CHECK_GE(outputSize, 1); - return outputSize; - } - - int imageSize(int outputSize, int filterSize, int padding, int stride) { - int imageSize; - if (!caffeMode_) { - imageSize = - (outputSize - 1) * stride + filterSize - 2 * padding - stride + 1; - } else { - imageSize = (outputSize - 1) * stride + filterSize - 2 * padding; - } - CHECK_GE(imageSize, 1); - return imageSize; - } }; } // namespace paddle diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/gserver/tests/test_ConvTrans.cpp index 9246484ba22c2..bff7222b29907 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/gserver/tests/test_ConvTrans.cpp @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/trainer/Trainer.h" #include "paddle/utils/GlobalConstants.h" #include "paddle/gserver/layers/ExpandConvTransLayer.h" +#include "paddle/math/MathUtils.h" #include "TestUtil.h" #include "LayerGradUtil.h" @@ -56,11 +57,9 @@ TEST(Layer, convTransLayerFwd) { conv->set_groups(1); conv->set_filter_channels(3 / conv->groups()); conv->set_img_size(16); - conv->set_output_x( - (2 * conv->padding() + conv->img_size() - conv->filter_size()) / - ((float)conv->stride()) + - 1.5); - + conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(), + conv->padding(), conv->stride(), + /* caffeMode */ true)); configt.layerConfig.set_size(conv->img_size() * conv->img_size() * configt.layerConfig.num_filters()); configt.layerConfig.set_name("convTrans"); @@ -99,10 +98,9 @@ TEST(Layer, convTransLayerFwd) { conv->set_groups(1); conv->set_filter_channels(conv->channels() / conv->groups()); conv->set_img_size(16); - conv->set_output_x( - (2 * conv->padding() + conv->img_size() - conv->filter_size()) / - ((float)conv->stride()) + - 1.5); + conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(), + conv->padding(), conv->stride(), + /* caffeMode */ true)); config.layerConfig.set_size(conv->output_x() * conv->output_x() * config.layerConfig.num_filters()); config.layerConfig.set_name("conv"); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 0fed97a73b0a4..02a30719f98e0 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -336,10 +336,9 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { conv->set_groups(1); conv->set_filter_channels(3 / conv->groups()); conv->set_img_size(16); - conv->set_output_x( - (2 * conv->padding() + conv->img_size() - conv->filter_size()) / - ((float)conv->stride()) + - 1.5); + conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(), + conv->padding(), conv->stride(), + /* caffeMode */ true)); config.layerConfig.set_size(conv->img_size() * conv->img_size() * config.layerConfig.num_filters()); diff --git a/paddle/math/MathUtils.cpp b/paddle/math/MathUtils.cpp index c1af8628d03c5..548f17936381c 100644 --- a/paddle/math/MathUtils.cpp +++ b/paddle/math/MathUtils.cpp @@ -80,4 +80,17 @@ int outputSize(int imageSize, int filterSize, int padding, int stride, return outputSize; } +int imageSize(int outputSize, int filterSize, int padding, int stride, + bool caffeMode) { + int imageSize; + if (!caffeMode) { + imageSize = + (outputSize - 1) * stride + filterSize - 2 * padding - stride + 1; + } else { + imageSize = (outputSize - 1) * stride + filterSize - 2 * padding; + } + CHECK_GE(imageSize, 1); + return imageSize; +} + } // namespace paddle diff --git a/paddle/math/MathUtils.h b/paddle/math/MathUtils.h index 49d0c10a8f5e4..ae035e55bcceb 100644 --- a/paddle/math/MathUtils.h +++ b/paddle/math/MathUtils.h @@ -60,4 +60,7 @@ void sparseRand(int* major, int* minor, int nnz, int majorLen, int minorMax, int outputSize(int imageSize, int filterSize, int padding, int stride, bool caffeMode); +int imageSize(int outputSize, int filterSize, int padding, int stride, + bool caffeMode); + } // namespace paddle diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index b75c2618411d3..8b13e4a142544 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1107,14 +1107,10 @@ def parse_conv(conv, input_layer_name, conv_conf, trans=False): ("Input layer %s: Incorrect input image size %d for input " + "image pixels %d") % (input_layer_name, conv_conf.img_size, img_pixels)) - if conv.caffe_mode: - conv_conf.output_x = \ - 1 + int(math.floor((2 * conv.padding + conv_conf.img_size \ - - conv.filter_size) / float(conv.stride))) - else: - conv_conf.output_x = \ - 1 + int(math.ceil((2 * conv.padding + conv_conf.img_size \ - - conv.filter_size) / float(conv.stride))) + + conv_conf.output_x = cnn_output_size( + conv_conf.img_size, conv_conf.filter_size, + conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) else: outputSize = g_layer_map[input_layer_name].size / conv.channels print('channels=%d size=%d'%(conv.channels, From 449120991ef9d4deb5e9635be3017824a0370a29 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 15:02:49 -0800 Subject: [PATCH 138/180] minor change to convTransLayer test in test_LayerGrad --- paddle/gserver/tests/test_LayerGrad.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index 02a30719f98e0..7b6e6fd3999ff 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -349,10 +349,9 @@ void testConvTransLayer(const string& type, bool trans, bool useGpu) { } TEST(Layer, convTransLayer) { - testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ false); -#ifndef PADDLE_ONLY_CPU - testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ true); -#endif + for (auto useGpu : {false, true}) { + testConvTransLayer("exconvt", /* trans= */ false, /* useGpu= */ useGpu); + } } TEST(Layer, blockExpandLayer) { From af7a50c0d4e42d53920681c661921d4e78ff8e36 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 9 Nov 2016 11:26:28 -0800 Subject: [PATCH 139/180] minor changes on deconv implementation and add protostr test for deconv layer --- paddle/gserver/layers/ExpandConvLayer.cpp | 5 +--- .../gserver/layers/ExpandConvTransLayer.cpp | 2 -- paddle/gserver/layers/ExpandConvTransLayer.h | 2 +- paddle/math/MathUtils.h | 4 +++ python/paddle/trainer/config_parser.py | 28 +++++++++---------- .../tests/configs/generate_protostr.sh | 2 +- .../tests/configs/img_trans_layers.py | 22 +++++++++++++++ 7 files changed, 43 insertions(+), 22 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/gserver/layers/ExpandConvLayer.cpp index 9f30fcf00a422..5ea1fdece5f7b 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/gserver/layers/ExpandConvLayer.cpp @@ -32,10 +32,7 @@ void ExpandConvLayer::forward(PassType passType) { Layer::forward(passType); /* malloc memory for the output_ if necessary */ - /* note: one sample correspond to one colum, and the - * transOutValue correspond sample to one row */ - int batchSize = inputLayers_[0]->getOutputValue()->getWidth(); - batchSize = inputLayers_[0]->getOutputValue()->getHeight(); + int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); resetOutput(batchSize, getOutputSize()); MatrixPtr image = nullptr; diff --git a/paddle/gserver/layers/ExpandConvTransLayer.cpp b/paddle/gserver/layers/ExpandConvTransLayer.cpp index 4c4016c30168f..a3e160f1f4eb5 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.cpp +++ b/paddle/gserver/layers/ExpandConvTransLayer.cpp @@ -38,8 +38,6 @@ void ExpandConvTransLayer::forward(PassType passType) { Layer::forward(passType); /* malloc memory for the output_ if necessary */ - /* note: one sample correspond to one colum, and the - * transOutValue correspond sample to one row */ int batchSize = inputLayers_[0]->getOutputValue()->getHeight(); resetOutput(batchSize, getOutputSize()); diff --git a/paddle/gserver/layers/ExpandConvTransLayer.h b/paddle/gserver/layers/ExpandConvTransLayer.h index d0c0469c351aa..87c464a97f2ed 100644 --- a/paddle/gserver/layers/ExpandConvTransLayer.h +++ b/paddle/gserver/layers/ExpandConvTransLayer.h @@ -26,7 +26,7 @@ namespace paddle { * This layer expands input and use matrix multiplication to * calculate convolution transpose (deconv) operation. * - * The config file api is img_convTrans_layer. + * The config file api is img_conv_layer with flag trans=True. */ class ExpandConvTransLayer : public ExpandConvBaseLayer { public: diff --git a/paddle/math/MathUtils.h b/paddle/math/MathUtils.h index ae035e55bcceb..91683dc3e9144 100644 --- a/paddle/math/MathUtils.h +++ b/paddle/math/MathUtils.h @@ -60,6 +60,10 @@ void sparseRand(int* major, int* minor, int nnz, int majorLen, int minorMax, int outputSize(int imageSize, int filterSize, int padding, int stride, bool caffeMode); +/** + * Calculate image size based on output size and caffeMode_. + * It is the reverse function of outputSize() + */ int imageSize(int outputSize, int filterSize, int padding, int stride, bool caffeMode); diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 8b13e4a142544..4a701326e4f9c 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -1017,6 +1017,17 @@ def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode): else: return 1 + int(math.ceil(output)) +''' +calcualte image_size based on output_size for convolution. +It is the reverse function of cnn_output_size +''' +def cnn_image_size(output_size, filter_size, padding, stride, caffe_mode): + if caffe_mode: + img_size = (output_size - 1) * stride + filter_size - 2 * padding + else: + img_size = (output_size - 2) * stride + filter_size - 2 * padding + 1 + return img_size + def parse_pool(pool, input_layer_name, pool_conf): pool_conf.pool_type = pool.pool_type config_assert(pool.pool_type in ['max-projection', 'avg-projection', @@ -1120,14 +1131,9 @@ def parse_conv(conv, input_layer_name, conv_conf, trans=False): ("Input layer %s: Incorrect input image size %d for input " + "image pixels %d") % (input_layer_name, conv_conf.output_x, outputSize)) - if conv.caffe_mode: - conv_conf.img_size = \ - (conv_conf.output_x - 1) * conv.stride \ - + conv.filter_size - 2 * conv.padding - else: - conv_conf.img_size = \ - (conv_conf.output_x - 2) * conv.stride \ - + conv.filter_size - 2 * conv.padding + 1 + conv_conf.img_size = cnn_image_size( + conv_conf.output_x, conv_conf.filter_size, + conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels @@ -1656,12 +1662,6 @@ def __init__( use_gpu = int(g_command_config_args.get("use_gpu", 0)) parallel_nn = int(g_command_config_args.get("parallel_nn", 0)) - # Automatically select cudnn_type for GPU and exconv for CPU - # if set type=conv, but still reserve the way user specify - # exconv or cudnn_conv manually. - if self.layer_type == "cudnn_convt": - config_assert(use_gpu, "cudnn_convt only support GPU") - # cudnn_convt has not been implemented so use exconvt only self.layer_type = "exconvt" # need to specify layer in config diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 77774f6fcfafd..b8687e1d48371 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -9,7 +9,7 @@ protostr=$PWD/protostr configs=(test_fc layer_activations projections test_print_layer test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid -img_layers util_layers simple_rnn_layers unused_layers test_cost_layers +img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight test_maxout test_bi_grumemory math_ops) diff --git a/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py new file mode 100644 index 0000000000000..077c78d201648 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/img_trans_layers.py @@ -0,0 +1,22 @@ +from paddle.trainer_config_helpers import * + +settings( + learning_rate=1e-3, + batch_size=1000 +) + +img = data_layer(name='image', size=227*227) + +# the parse_conv in config_parse.py is not strictly accurate when filter_size +# is not square. So here set square filter_size. +img_conv = img_conv_layer(input=img, num_channels=1, num_filters=64, + filter_size=(32, 32), padding=(1, 1), stride=(1, 1), + act=LinearActivation(), trans=True) +img_bn = batch_norm_layer(input=img_conv, act=ReluActivation()) + +img_norm = img_cmrnorm_layer(input=img_bn, size=32) + +img_pool = img_pool_layer(input=img_conv, pool_size=32, pool_type=MaxPooling()) + + +outputs(img_pool, img_norm) From 1c58e27fdc94ebdec8522e36c47b3d636a835896 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Wed, 9 Nov 2016 11:43:40 -0800 Subject: [PATCH 140/180] fixed a bug in parse_conv in config_parser.py --- python/paddle/trainer/config_parser.py | 17 +- .../protostr/img_trans_layers.protostr | 176 ++++++++++++++++++ 2 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 4a701326e4f9c..958bfdaf2e283 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -649,7 +649,8 @@ def __init__( parse_conv(conv_conf, input_layer_name, - self.proj_conf.conv_conf) + self.proj_conf.conv_conf, + num_filters) # TODO: support rectangle input self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x ** 2) * num_filters @@ -730,7 +731,8 @@ def __init__( parse_conv(conv_conf, MakeLayerNameInSubmodel(input_layer_names[0]), - self.operator_conf.conv_conf) + self.operator_conf.conv_conf, + num_filters) self.operator_conf.output_size = (self.operator_conf.conv_conf.output_x ** 2) * num_filters config_assert(len(input_layer_names) == 2, "Conv is binary operator") @@ -1097,7 +1099,7 @@ def parse_norm(norm, input_layer_name, norm_conf): caffe_mode: compute the output size using floor instead of ceil, which is consistent of caffe and CuDNN's convention. ''' -def parse_conv(conv, input_layer_name, conv_conf, trans=False): +def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y conv_conf.channels = conv.channels @@ -1106,10 +1108,11 @@ def parse_conv(conv, input_layer_name, conv_conf, trans=False): conv_conf.stride = conv.stride conv_conf.stride_y = conv.stride_y conv_conf.groups = conv.groups - conv_conf.filter_channels = conv.channels / conv.groups conv_conf.caffe_mode = conv.caffe_mode if not trans: + conv_conf.filter_channels = conv.channels / conv.groups + img_pixels = g_layer_map[input_layer_name].size / conv.channels print('channels=%d size=%d'%(conv.channels, g_layer_map[input_layer_name].size)) @@ -1123,6 +1126,8 @@ def parse_conv(conv, input_layer_name, conv_conf, trans=False): conv_conf.img_size, conv_conf.filter_size, conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) else: + conv_conf.filter_channels = num_filters / conv.groups + outputSize = g_layer_map[input_layer_name].size / conv.channels print('channels=%d size=%d'%(conv.channels, g_layer_map[input_layer_name].size)) @@ -1616,7 +1621,8 @@ def __init__( parse_conv( self.inputs[input_index].conv, input_layer.name, - self.config.inputs[input_index].conv_conf) + self.config.inputs[input_index].conv_conf, + num_filters) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) print("output size for %s is %d " % (name, conv_conf.output_x)) @@ -1676,6 +1682,7 @@ def __init__( self.inputs[input_index].conv, input_layer.name, self.config.inputs[input_index].conv_conf, + num_filters, trans=True) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr new file mode 100644 index 0000000000000..38346354080b0 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/img_trans_layers.protostr @@ -0,0 +1,176 @@ +type: "nn" +layers { + name: "image" + type: "data" + size: 51529 + active_type: "" +} +layers { + name: "__conv_0__" + type: "exconvt" + size: 4194304 + active_type: "" + inputs { + input_layer_name: "image" + input_parameter_name: "___conv_0__.w0" + conv_conf { + filter_size: 32 + channels: 1 + stride: 1 + padding: 1 + groups: 1 + filter_channels: 64 + output_x: 227 + img_size: 256 + caffe_mode: true + filter_size_y: 32 + padding_y: 1 + stride_y: 1 + } + } + bias_parameter_name: "___conv_0__.wbias" + num_filters: 64 + shared_biases: true +} +layers { + name: "__batch_norm_0__" + type: "batch_norm" + size: 4194304 + active_type: "relu" + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w0" + image_conf { + channels: 64 + img_size: 256 + } + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w1" + } + inputs { + input_layer_name: "__conv_0__" + input_parameter_name: "___batch_norm_0__.w2" + } + bias_parameter_name: "___batch_norm_0__.wbias" + moving_average_fraction: 0.9 +} +layers { + name: "__crmnorm_0__" + type: "norm" + size: 4194304 + active_type: "" + inputs { + input_layer_name: "__batch_norm_0__" + norm_conf { + norm_type: "cmrnorm-projection" + channels: 64 + size: 32 + scale: 0.0004 + pow: 0.75 + output_x: 256 + img_size: 256 + blocked: false + } + } +} +layers { + name: "__pool_0__" + type: "pool" + size: 3240000 + active_type: "" + inputs { + input_layer_name: "__conv_0__" + pool_conf { + pool_type: "max-projection" + channels: 64 + size_x: 32 + stride: 1 + output_x: 225 + img_size: 256 + padding: 0 + size_y: 32 + stride_y: 1 + output_y: 225 + img_size_y: 256 + padding_y: 0 + } + } +} +parameters { + name: "___conv_0__.w0" + size: 65536 + initial_mean: 0.0 + initial_std: 0.0441941738242 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___conv_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 64 + dims: 1 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w0" + size: 64 + initial_mean: 1.0 + initial_std: 0.0 + initial_strategy: 0 + initial_smart: false +} +parameters { + name: "___batch_norm_0__.w1" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.w2" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false + is_static: true + is_shared: true +} +parameters { + name: "___batch_norm_0__.wbias" + size: 64 + initial_mean: 0.0 + initial_std: 0.0 + dims: 1 + dims: 64 + initial_strategy: 0 + initial_smart: false +} +input_layer_names: "image" +output_layer_names: "__pool_0__" +output_layer_names: "__crmnorm_0__" +sub_models { + name: "root" + layer_names: "image" + layer_names: "__conv_0__" + layer_names: "__batch_norm_0__" + layer_names: "__crmnorm_0__" + layer_names: "__pool_0__" + input_layer_names: "image" + output_layer_names: "__pool_0__" + output_layer_names: "__crmnorm_0__" + is_recurrent_layer_group: false +} + From 9ff0db37feb08b0afe9e78c79379b6418c904f64 Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 10 Nov 2016 09:23:48 +0800 Subject: [PATCH 141/180] Generate bilinear protostr via Linux --- .../tests/configs/protostr/test_bilinear_interp.protostr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr index d4cbfc2389ac5..13d0d477eb58f 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_bilinear_interp.protostr @@ -83,7 +83,7 @@ parameters { name: "___conv_0__.w0" size: 144 initial_mean: 0.0 - initial_std: 0.471404522657 + initial_std: 0.471404520791 initial_strategy: 0 initial_smart: false } From 8d4c453bbb7980491d539bbe574d5e729c90a097 Mon Sep 17 00:00:00 2001 From: Haonan Date: Wed, 9 Nov 2016 17:24:37 -0800 Subject: [PATCH 142/180] set mixedlayer output size according to input operator (#414) * set mixedlayer output size according to input operator * change from num_channel to num_channels for conv_operator (the old one is really misleading because all the others are num_channels) * also changed the arg name in projections.py --- .../paddle/trainer_config_helpers/layers.py | 43 ++++++++++--------- .../tests/configs/projections.py | 2 +- .../tests/layers_test_config.py | 4 +- 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 49f0ff3289db7..6b5d39a47158b 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -590,7 +590,7 @@ def __enter__(self): def __exit__(self, *args, **kwargs): del args, kwargs # unused parameter to suppress warning assert len(self.inputs) != 0 - MixedLayer( + ml = MixedLayer( name=self.name, size=self.size, active_type=self.activation.name, @@ -598,6 +598,9 @@ def __exit__(self, *args, **kwargs): inputs=self.inputs, **ExtraLayerAttribute.to_kwargs(self.layer_attr) ) + # update the size which might be computed inside MixedLayer + # according to the operator's output size + self.size = ml.config.size @wrap_name_default("mixed") @@ -2045,7 +2048,7 @@ def __reduce_concat_type__(a, b): if layer_type == LayerType.CONCAT_LAYER: assert not bias_attr - + Layer( name=name, type=layer_type, inputs=[x.name for x in input] if is_concat_layer else input, @@ -2623,7 +2626,7 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None): assert isinstance(input1, LayerOutput) assert isinstance(input2, LayerOutput) Layer(name=name, - type="out_prod", + type=LayerType.OUT_PROD_LAYER, inputs=[input1.name, input2.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name=name, @@ -2790,7 +2793,7 @@ def __real_step__(*args): def __cost_input__(input, label, weight=None): """ - inputs and parents for cost layers. + inputs and parents for cost layers. """ ipts = [Input(input.name), Input(label.name)] parents = [input, label] @@ -2799,7 +2802,7 @@ def __cost_input__(input, label, weight=None): ipts.append(Input(weight.name)) parents.append(weight) return ipts, parents - + @wrap_name_default() @layer_support() @@ -2884,7 +2887,7 @@ def __add_evaluator__(e): def conv_operator(img, filter, filter_size, num_filters, - num_channel=None, stride=1, padding=0, + num_channels=None, stride=1, padding=0, filter_size_y=None, stride_y=None, padding_y=None): """ Different from img_conv_layer, conv_op is an Operator, which can be used @@ -2914,8 +2917,8 @@ def conv_operator(img, filter, filter_size, num_filters, :type filter_size_y: int :param num_filters: channel of output data. :type num_filters: int - :param num_channel: channel of input data. - :type num_channel: int + :param num_channels: channel of input data. + :type num_channels: int :param stride: The x dimension of the stride. :type stride: int :param stride_y: The y dimension of the stride. @@ -2934,19 +2937,19 @@ def conv_operator(img, filter, filter_size, num_filters, if padding_y is None: padding_y = padding - if num_channel is None: - num_channel = img.num_filters + if num_channels is None: + num_channels = img.num_filters assert isinstance(filter, LayerOutput) if filter.size is not None: - filter.size = filter_size * filter_size_y * num_filters * num_channel + filter.size = filter_size * filter_size_y * num_filters * num_channels op = ConvOperator(input_layer_names=[img.name, filter.name], num_filters=num_filters, conv_conf=Conv(filter_size=filter_size, padding=padding, stride=stride, - channels=num_channel, + channels=num_channels, filter_size_y=filter_size_y, padding_y=padding_y, stride_y=stride_y, @@ -2986,8 +2989,8 @@ def conv_projection(input, filter_size, num_filters, :type filter_size_y: int :param num_filters: channel of output data. :type num_filters: int - :param num_channel: channel of input data. - :type num_channel: int + :param num_channels: channel of input data. + :type num_channels: int :param stride: The x dimension of the stride. :type stride: int :param stride_y: The y dimension of the stride. @@ -3478,15 +3481,15 @@ def maxout_layer(input, - Input: output of a conv layer. - Output: feature map size same as input. Channel is (input channel) / groups. - So groups should be larger than 1, and the num of channels should be able + So groups should be larger than 1, and the num of channels should be able to devided by groups. - Please refer to Paper: + Please refer to Paper: - Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf - Multi-digit Number Recognition from Street View \ Imagery using Deep Convolutional Neural Networks: \ https://arxiv.org/pdf/1312.6082v4.pdf - + The simple usage is: .. code-block:: python @@ -3731,9 +3734,9 @@ def nce_layer(input, label, num_classes, weight=None, :param weight: weight layer, can be None(default) :type weight: LayerOutput :param num_classes: number of classes. - :type num_classes: int + :type num_classes: int :param num_neg_samples: number of negative samples. Default is 10. - :type num_neg_samples: int + :type num_neg_samples: int :param neg_distribution: The distribution for generating the random negative labels. A uniform distribution will be used if not provided. If not None, its length must be equal to num_classes. @@ -3754,7 +3757,7 @@ def nce_layer(input, label, num_classes, weight=None, assert isinstance(neg_distribution, collections.Sequence) assert len(neg_distribution) == num_classes assert sum(neg_distribution) == 1 - + ipts_for_layer = [] parents = [] for each_input in input: diff --git a/python/paddle/trainer_config_helpers/tests/configs/projections.py b/python/paddle/trainer_config_helpers/tests/configs/projections.py index 4066c5bc6e0f0..51194b5a2a8ae 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/projections.py +++ b/python/paddle/trainer_config_helpers/tests/configs/projections.py @@ -35,7 +35,7 @@ with mixed_layer() as m7: m7 += conv_operator(img=img, filter=flt, num_filters=64, - num_channel=1, filter_size=3) + num_channels=1, filter_size=3) end = mixed_layer(input=[full_matrix_projection(input=m5), trans_full_matrix_projection(input=m6), diff --git a/python/paddle/trainer_config_helpers/tests/layers_test_config.py b/python/paddle/trainer_config_helpers/tests/layers_test_config.py index faaab9107d8fb..26be84f122180 100644 --- a/python/paddle/trainer_config_helpers/tests/layers_test_config.py +++ b/python/paddle/trainer_config_helpers/tests/layers_test_config.py @@ -29,9 +29,11 @@ filter=y1, filter_size=1, num_filters=5, - num_channel=5, + num_channels=5, stride=1)]) +assert z1.size > 0 + y2 = fc_layer(input=y, size=15) cos1 = cos_sim(a=x1, b=y1) From 880774d1418d53a3f4cf103891bc77ebed05e685 Mon Sep 17 00:00:00 2001 From: Haonan Date: Wed, 9 Nov 2016 17:48:17 -0800 Subject: [PATCH 143/180] change the act.name for LinearActivation() to "linear" so that it won't fail in hl_activetype; also fix the hasinputsset in submodel --- python/paddle/trainer/config_parser.py | 10 +++++----- python/paddle/trainer_config_helpers/activations.py | 8 ++++---- python/paddle/trainer_config_helpers/layers.py | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 73631602a92be..2abbf565039e4 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -218,7 +218,7 @@ def Inputs(*args): @config_func def HasInputsSet(): - return len(g_config.model_config.input_layer_names) != 0 + return len(g_current_submodel.input_layer_names) != 0 # Define the name of the output layers of the NeuralNetwork. @@ -1120,14 +1120,14 @@ def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.output_x = 0 else: block_expand_conf.output_x = cnn_output_size( - block_expand.img_size_x, block_expand.block_x, + block_expand.img_size_x, block_expand.block_x, block_expand.padding_x, block_expand.stride_x, False) if block_expand_conf.img_size_y == 0: block_expand_conf.output_y = 0 else: block_expand_conf.output_y = cnn_output_size( - block_expand.img_size_y, block_expand.block_y, + block_expand.img_size_y, block_expand.block_y, block_expand.padding_y, block_expand.stride_y, False) def parse_maxout(maxout, input_layer_name, maxout_conf): @@ -1135,7 +1135,7 @@ def parse_maxout(maxout, input_layer_name, maxout_conf): maxout_conf.groups = maxout.groups maxout_conf.img_size_x = maxout.img_size_x maxout_conf.img_size_y = maxout.img_size_y - + # Define an evaluator @config_func def Evaluator( @@ -1773,7 +1773,7 @@ def __init__( self.config.inputs[0].maxout_conf) maxout_conf = self.config.inputs[0].maxout_conf self.set_layer_size(g_layer_map[input_layer.name].size / maxout_conf.groups) - + # key: cost type # value: cost class g_cost_map = {} diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index ad5cdc0a0eb13..29b5437446d78 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -23,9 +23,9 @@ class BaseActivation(object): """ - A mark for activation class. + A mark for activation class. Each activation inherit BaseActivation, which has two parameters. - + :param name: activation name in paddle config. :type name: basestring :param support_hppl: True if supported by hppl. HPPL is a library used by paddle @@ -104,7 +104,7 @@ class IdentityActivation(BaseActivation): Just do nothing for output both forward/backward. """ - def __init__(self): BaseActivation.__init__(self, '', False) + def __init__(self): BaseActivation.__init__(self, 'linear', False) LinearActivation = IdentityActivation @@ -194,7 +194,7 @@ def __init__(self): BaseActivation.__init__(self, 'square', False) class ExpActivation(BaseActivation): """ Exponential Activation. - + .. math:: f(z) = e^z. """ diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 6b5d39a47158b..bf1d0631aa46a 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1657,7 +1657,7 @@ def img_pool_layer(input, pool_size, name=None, :type pool_size_y: int|None :param num_channels: number of input channel. :type num_channels: int - :param pool_type: pooling type. MaxPooling or AveragePooling. Default is + :param pool_type: pooling type. MaxPooling or AvgPooling. Default is MaxPooling. :type pool_type: BasePoolingType :param stride: stride width of pooling. From f27ff4d8a4e1370ceff5d3082bef8b712b1b23cb Mon Sep 17 00:00:00 2001 From: liaogang Date: Thu, 10 Nov 2016 11:06:02 +0800 Subject: [PATCH 144/180] Revise code --- paddle/gserver/tests/test_LayerGrad.cpp | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index c1f7876c1722f..55a6f66ac5171 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -187,15 +187,13 @@ TEST(Layer, BilinearInterpLayer) { bilinear->set_img_size_y(32); bilinear->set_num_channels(4); - bilinear->set_out_size_x(32); - bilinear->set_out_size_y(32); - testLayerGrad(config, "bilinear_interp", 10, false, false); - testLayerGrad(config, "bilinear_interp", 10, false, true); - - bilinear->set_out_size_x(64); - bilinear->set_out_size_y(64); - testLayerGrad(config, "bilinear_interp", 10, false, false); - testLayerGrad(config, "bilinear_interp", 10, false, true); + for (auto useGpu : {false, true}) { + for (auto outSize : {32, 64}) { + bilinear->set_out_size_x(outSize); + bilinear->set_out_size_y(outSize); + testLayerGrad(config, "bilinear_interp", 10, false, useGpu); + } + } } TEST(Layer, concat) { From a45e6c95bd66e32a2d3a6e89172f1d198f2a9a81 Mon Sep 17 00:00:00 2001 From: qijun Date: Thu, 10 Nov 2016 07:20:54 +0000 Subject: [PATCH 145/180] use yapf to format python code, add style config file --- .style.yapf | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .style.yapf diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 0000000000000..c9cdc2e790a40 --- /dev/null +++ b/.style.yapf @@ -0,0 +1,5 @@ +[style] +based_on_style = google +indent_width = 4 +spaces_before_comment = 4 +split_before_logical_operator = true From 64b7561ce68440b168ba7bc20471cd41a2c63f58 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 10 Nov 2016 22:37:47 +0800 Subject: [PATCH 146/180] Add checkout name for Dockerfile * Because in dockerhub, we cannot set the `docker build `running directory, we could only use `git clone` command to get the latest code if we put `Dockerfile` in subdirectory * But the `git clone` will checkout the default branch only, so here we add a `ENV` in Dockerfile to checkout special branch or tag in git repo. We could change it to `V0.9.0` tag when it release. --- paddle/scripts/docker/Dockerfile.cpu | 1 + paddle/scripts/docker/Dockerfile.cpu-demo | 1 + paddle/scripts/docker/Dockerfile.cpu-devel | 1 + paddle/scripts/docker/Dockerfile.cpu-noavx | 1 + paddle/scripts/docker/Dockerfile.cpu-noavx-demo | 1 + paddle/scripts/docker/Dockerfile.cpu-noavx-devel | 1 + paddle/scripts/docker/Dockerfile.gpu | 1 + paddle/scripts/docker/Dockerfile.gpu-demo | 1 + paddle/scripts/docker/Dockerfile.gpu-devel | 1 + paddle/scripts/docker/Dockerfile.gpu-noavx | 1 + paddle/scripts/docker/Dockerfile.gpu-noavx-demo | 1 + paddle/scripts/docker/Dockerfile.gpu-noavx-devel | 1 + paddle/scripts/docker/Dockerfile.m4 | 1 + paddle/scripts/docker/build.sh | 1 + 14 files changed, 14 insertions(+) diff --git a/paddle/scripts/docker/Dockerfile.cpu b/paddle/scripts/docker/Dockerfile.cpu index 3aa8cb1a3a869..a833c69c66900 100644 --- a/paddle/scripts/docker/Dockerfile.cpu +++ b/paddle/scripts/docker/Dockerfile.cpu @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=OFF ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.cpu-demo b/paddle/scripts/docker/Dockerfile.cpu-demo index 22c0b9e701bfc..1fda1e472b290 100644 --- a/paddle/scripts/docker/Dockerfile.cpu-demo +++ b/paddle/scripts/docker/Dockerfile.cpu-demo @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=ON ENV WITH_DEMO=ON diff --git a/paddle/scripts/docker/Dockerfile.cpu-devel b/paddle/scripts/docker/Dockerfile.cpu-devel index b40f3c0a30ba3..66bdc978ddcb4 100644 --- a/paddle/scripts/docker/Dockerfile.cpu-devel +++ b/paddle/scripts/docker/Dockerfile.cpu-devel @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=ON ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.cpu-noavx b/paddle/scripts/docker/Dockerfile.cpu-noavx index 5cb5ac7dc4e68..d0ba30e55afb2 100644 --- a/paddle/scripts/docker/Dockerfile.cpu-noavx +++ b/paddle/scripts/docker/Dockerfile.cpu-noavx @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=OFF ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.cpu-noavx-demo b/paddle/scripts/docker/Dockerfile.cpu-noavx-demo index bec401960efb2..28439b4bdfab4 100644 --- a/paddle/scripts/docker/Dockerfile.cpu-noavx-demo +++ b/paddle/scripts/docker/Dockerfile.cpu-noavx-demo @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=ON ENV WITH_DEMO=ON diff --git a/paddle/scripts/docker/Dockerfile.cpu-noavx-devel b/paddle/scripts/docker/Dockerfile.cpu-noavx-devel index b7c3eaed97aa5..eb4739d6dc742 100644 --- a/paddle/scripts/docker/Dockerfile.cpu-noavx-devel +++ b/paddle/scripts/docker/Dockerfile.cpu-noavx-devel @@ -1,6 +1,7 @@ FROM ubuntu:14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=OFF ENV IS_DEVEL=ON ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.gpu b/paddle/scripts/docker/Dockerfile.gpu index b7f5b6d93df50..fa61cfeec851f 100644 --- a/paddle/scripts/docker/Dockerfile.gpu +++ b/paddle/scripts/docker/Dockerfile.gpu @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=OFF ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.gpu-demo b/paddle/scripts/docker/Dockerfile.gpu-demo index 2d1411de09f2a..4f5417c1af072 100644 --- a/paddle/scripts/docker/Dockerfile.gpu-demo +++ b/paddle/scripts/docker/Dockerfile.gpu-demo @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=ON ENV WITH_DEMO=ON diff --git a/paddle/scripts/docker/Dockerfile.gpu-devel b/paddle/scripts/docker/Dockerfile.gpu-devel index eb13f4304fa06..37cfced190886 100644 --- a/paddle/scripts/docker/Dockerfile.gpu-devel +++ b/paddle/scripts/docker/Dockerfile.gpu-devel @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=ON ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.gpu-noavx b/paddle/scripts/docker/Dockerfile.gpu-noavx index 0944b0e152af3..95fb125b799e8 100644 --- a/paddle/scripts/docker/Dockerfile.gpu-noavx +++ b/paddle/scripts/docker/Dockerfile.gpu-noavx @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=OFF ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.gpu-noavx-demo b/paddle/scripts/docker/Dockerfile.gpu-noavx-demo index 2da2a55d696a3..b5fbe4b941d68 100644 --- a/paddle/scripts/docker/Dockerfile.gpu-noavx-demo +++ b/paddle/scripts/docker/Dockerfile.gpu-noavx-demo @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=ON ENV WITH_DEMO=ON diff --git a/paddle/scripts/docker/Dockerfile.gpu-noavx-devel b/paddle/scripts/docker/Dockerfile.gpu-noavx-devel index 9f551462f206a..531c8ec7ae30c 100644 --- a/paddle/scripts/docker/Dockerfile.gpu-noavx-devel +++ b/paddle/scripts/docker/Dockerfile.gpu-noavx-devel @@ -1,6 +1,7 @@ FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04 MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=ON ENV IS_DEVEL=ON ENV WITH_DEMO=OFF diff --git a/paddle/scripts/docker/Dockerfile.m4 b/paddle/scripts/docker/Dockerfile.m4 index 129d21b36abd9..57c8655844133 100644 --- a/paddle/scripts/docker/Dockerfile.m4 +++ b/paddle/scripts/docker/Dockerfile.m4 @@ -1,6 +1,7 @@ FROM PADDLE_BASE_IMAGE MAINTAINER PaddlePaddle Dev Team COPY build.sh /root/ +ENV GIT_CHECKOUT=develop ENV WITH_GPU=PADDLE_WITH_GPU ENV IS_DEVEL=PADDLE_IS_DEVEL ENV WITH_DEMO=PADDLE_WITH_DEMO diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 33689e736cda7..ec5f3bd967d35 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -23,6 +23,7 @@ fi cd ~ git clone https://github.com/baidu/Paddle.git paddle cd paddle +git checkout ${GIT_CHECKOUT} mkdir build cd build cmake .. -DWITH_DOC=OFF -DWITH_GPU=${WITH_GPU} -DWITH_SWIG_PY=ON\ From 36fa251756f583b9c40f067717990fff062bf9ae Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Thu, 10 Nov 2016 10:07:13 -0800 Subject: [PATCH 147/180] '*' operator overload for LayerOutput Making '*' support the multiplication between a scalar and LayerOutput Also changing '+' to support adding between a vector and a scalar. Change-Id: I7daf35590dc2b2f855a29d9ef43ac57979442e0f --- doc/ui/api/trainer_config_helpers/layers.rst | 6 + python/paddle/trainer/config_parser.py | 2 +- .../paddle/trainer_config_helpers/__init__.py | 3 + .../paddle/trainer_config_helpers/layers.py | 146 +++++++++++++----- python/paddle/trainer_config_helpers/math.py | 43 +++++- .../tests/configs/math_ops.py | 8 +- .../tests/configs/protostr/math_ops.protostr | 135 +++++++++++++++- 7 files changed, 294 insertions(+), 49 deletions(-) diff --git a/doc/ui/api/trainer_config_helpers/layers.rst b/doc/ui/api/trainer_config_helpers/layers.rst index c78682423e448..a98e8f2f55c58 100644 --- a/doc/ui/api/trainer_config_helpers/layers.rst +++ b/doc/ui/api/trainer_config_helpers/layers.rst @@ -254,6 +254,12 @@ expand_layer :members: expand_layer :noindex: +repeat_layer +------------ +.. automodule:: paddle.trainer_config_helpers.layers + :members: repeat_layer + :noindex: + Math Layers =========== diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index c55579c960ecc..06ef35544590a 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -3015,7 +3015,7 @@ def Layer( layer_func = layers.get(type) config_assert(layer_func, "layer type '%s' not supported." % type) - layer_func(name, **xargs) + return layer_func(name, **xargs) @config_func def ParameterHook( diff --git a/python/paddle/trainer_config_helpers/__init__.py b/python/paddle/trainer_config_helpers/__init__.py index 451b9ac3396ea..adebebba2523f 100644 --- a/python/paddle/trainer_config_helpers/__init__.py +++ b/python/paddle/trainer_config_helpers/__init__.py @@ -20,3 +20,6 @@ from networks import * from optimizers import * from attrs import * + +# This will enable operator overload for LayerOutput +import math diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 59822180883c9..bd8e9f07b6aeb 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -31,6 +31,7 @@ __all__ = ["full_matrix_projection", "AggregateLevel", "ExpandLevel", "identity_projection", "dotmul_projection", "dotmul_operator", + "repeat_layer", "table_projection", "mixed_layer", "data_layer", "embedding_layer", "fc_layer", "grumemory", "pooling_layer", "lstmemory", "last_seq", "first_seq", @@ -99,6 +100,7 @@ class LayerType(object): SCALING_LAYER = 'scaling' TRANS_LAYER = 'trans' OUT_PROD_LAYER = 'out_prod' + FEATURE_MAP_EXPAND_LAYER = 'featmap_expand' MEMORY = 'memory' MAXID_LAYER = 'maxid' @@ -181,6 +183,7 @@ def __init__(self, name, layer_type, parents=None, activation=None, reverse=None): assert isinstance(name, basestring) assert isinstance(layer_type, basestring) + assert size is not None assert LayerType.is_layer_type(layer_type) self.name = name self.layer_type = layer_type @@ -1209,6 +1212,48 @@ def expand_layer(input, expand_as, parents=[input, expand_as]) +@wrap_name_default() +@layer_support() +def repeat_layer(input, num_repeats, + name=None, + layer_attr=None): + """ + A layer for repeating the input for num_repeats times. This is equivalent + to apply concat_layer() with num_repeats same input. + + .. math:: + y = [x, x, \cdots, x] + + The example usage is: + + .. code-block:: python + + expand = repeat_layer(layer, 4) + + :param input: Input layer + :type input: LayerOutput + :param num_repeats: Repeat the input so many times + :type num_repeats: int + :param name: Layer name. + :type name: basestring + :param layer_attr: extra layer attributes. + :type layer_attr: ExtraLayerAttribute. + :return: LayerOutput object. + :rtype: LayerOutput + """ + + l = Layer( + inputs=[input.name], + name=name, + num_filters=num_repeats, + type=LayerType.FEATURE_MAP_EXPAND_LAYER, + **ExtraAttr.to_kwargs(layer_attr) + ) + return LayerOutput(name=name, + size=l.config.size, + layer_type=LayerType.FEATURE_MAP_EXPAND_LAYER, + parents=[input]) + @wrap_name_default() @layer_support() def interpolation_layer(input, weight, name=None, layer_attr=None): @@ -1296,7 +1341,7 @@ def bilinear_interp_layer(input, assert out_size_x > 0 and out_size_y > 0 assert input.num_filters is not None num_channels = input.num_filters - Layer(name=name, + l = Layer(name=name, inputs=Input(input.name, bilinear_interp=BilinearInterp(out_size_x=out_size_x, out_size_y=out_size_y, @@ -1304,7 +1349,7 @@ def bilinear_interp_layer(input, type=LayerType.BILINEAR_INTERP_LAYER, **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], - num_filters=num_channels) + num_filters=num_channels, size=l.config.size) @wrap_name_default() @layer_support() @@ -1482,7 +1527,7 @@ def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None): inputs=[a.name, b.name], **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b]) + return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b], size=size) @wrap_name_default() @@ -1545,7 +1590,7 @@ def hsigmoid(input, label, num_classes, name=None, bias_attr=None, ipts_for_layer.append(label.name) parents.append(label) - Layer( + l = Layer( name=name, type=LayerType.HSIGMOID, num_classes=num_classes, @@ -1553,7 +1598,8 @@ def hsigmoid(input, label, num_classes, name=None, bias_attr=None, inputs=ipts_for_layer, **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.HSIGMOID, parents=parents) + return LayerOutput(name, LayerType.HSIGMOID, parents=parents, + size=l.config.size) @wrap_name_default("conv") @@ -1671,7 +1717,7 @@ def img_conv_layer(input, filter_size, num_filters, lt = LayerType.CONVTRANS_LAYER if trans else LayerType.CONV_LAYER - Layer( + l = Layer( name=name, inputs=Input(input.name, conv=Conv( filter_size=filter_size, padding=padding, stride=stride, @@ -1687,7 +1733,8 @@ def img_conv_layer(input, filter_size, num_filters, **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, lt, parents=[input], - activation=act, num_filters=num_filters) + activation=act, num_filters=num_filters, + size=l.config.size) @wrap_name_default("pool") @@ -1750,7 +1797,7 @@ def img_pool_layer(input, pool_size, name=None, stride_y = stride if stride_y is None else stride_y padding_y = padding if padding_y is None else padding_y - Layer( + l = Layer( name=name, type=LayerType.POOL_LAYER, inputs=[Input(input.name, @@ -1769,7 +1816,7 @@ def img_pool_layer(input, pool_size, name=None, **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.POOL_LAYER, parents=[input], - num_filters=num_channels) + num_filters=num_channels, size=l.config.size) def __img_norm_layer__(name, input, size, norm_type, scale, power, @@ -1778,7 +1825,7 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power, assert input.num_filters is not None num_channels = input.num_filters - Layer( + l = Layer( name=name, type=LayerType.NORM_LAYER, inputs=Input( input.name, norm=Norm(norm_type=norm_type, channels=num_channels, size=size, @@ -1788,7 +1835,8 @@ def __img_norm_layer__(name, input, size, norm_type, scale, power, **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, layer_type=LayerType.NORM_LAYER, parents=[input], - num_filters=num_channels, img_norm_type=norm_type) + num_filters=num_channels, img_norm_type=norm_type, + size=l.config.size) @wrap_name_default("crmnorm") @@ -1913,7 +1961,7 @@ def batch_norm_layer(input, act=None, name=None, num_channels=None, num_channels = input.size assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \ (batch_norm_type == "cudnn_batch_norm") - Layer( + l = Layer( name=name, inputs=Input(input.name, image=Image(channels=num_channels), @@ -1929,7 +1977,8 @@ def batch_norm_layer(input, act=None, name=None, num_channels=None, return LayerOutput(name=name, layer_type=LayerType.BATCH_NORM_LAYER, parents=[input], activation=act, - num_filters=num_channels) + num_filters=num_channels, + size=l.config.size) @wrap_name_default() @@ -2034,7 +2083,7 @@ def addto_layer(input, act=None, name=None, bias_attr=None, if each_input.num_filters is not None: num_filters = each_input.num_filters - Layer( + l = Layer( name=name, type=LayerType.ADDTO_LAYER, inputs=ipts_for_layer, bias=ParamAttr.to_bias(bias_attr), active_type=act.name, @@ -2042,7 +2091,8 @@ def addto_layer(input, act=None, name=None, bias_attr=None, ) return LayerOutput(name, LayerType.ADDTO_LAYER, parents=input, - activation=act, num_filters=num_filters) + activation=act, num_filters=num_filters, + size=l.config.size) @wrap_act_default(act=IdentityActivation()) @@ -2651,13 +2701,14 @@ def maxid_layer(input, name=None, layer_attr=None): """ assert isinstance(input, LayerOutput) - Layer(name=name, + l = Layer(name=name, type='maxid', inputs=[input.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name=name, layer_type=LayerType.MAXID_LAYER, - parents=[input]) + parents=[input], + size=l.config.size) @wrap_name_default() @@ -2686,13 +2737,14 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None): assert isinstance(input1, LayerOutput) assert isinstance(input2, LayerOutput) - Layer(name=name, + l = Layer(name=name, type=LayerType.OUT_PROD_LAYER, inputs=[input1.name, input2.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name=name, layer_type=LayerType.OUT_PROD_LAYER, - parents=[input1, input2]) + parents=[input1, input2], + size=l.config.size) @wrap_name_default() @@ -2721,13 +2773,14 @@ def eos_layer(input, eos_id, name=None, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput """ - Layer(name=name, + l = Layer(name=name, type=LayerType.EOSID_LAYER, eos_id=eos_id, inputs=[input.name], **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name=name, layer_type=LayerType.EOSID_LAYER, - parents=[input]) + parents=[input], + size=l.config.size) @wrap_name_default() @@ -2892,7 +2945,7 @@ def regression_cost(input, label, weight=None, name=None, Layer(inputs=ipts, type="square_error", name=name, **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.COST, parents=parents) + return LayerOutput(name, LayerType.COST, parents=parents, size=1) @wrap_name_default("cost") @@ -2944,7 +2997,7 @@ def __add_evaluator__(e): for each_evaluator in evaluator: __add_evaluator__(each_evaluator) - return LayerOutput(name, LayerType.COST, parents=parents) + return LayerOutput(name, LayerType.COST, parents=parents, size=1) def conv_operator(img, filter, filter_size, num_filters, @@ -3326,13 +3379,14 @@ def sampling_id_layer(input, name=None, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput """ - Layer( + l = Layer( name=name, type=LayerType.SAMPLING_ID_LAYER, inputs=[Input(input.name)], **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.SAMPLING_ID_LAYER, input) + return LayerOutput(name, LayerType.SAMPLING_ID_LAYER, input, + size=l.config.size) @wrap_name_default() @@ -3373,7 +3427,8 @@ def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0, inputs=[Input(input.name)], **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.SLOPE_INTERCEPT_LAYER, input) + return LayerOutput(name, LayerType.SLOPE_INTERCEPT_LAYER, input, + size=input.size) @wrap_name_default() @@ -3512,7 +3567,7 @@ def block_expand_layer(input, if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters - Layer(name=name, + l = Layer(name=name, inputs=Input(input.name, block_expand=BlockExpand(channels=num_channels, block_x=block_x, @@ -3525,7 +3580,8 @@ def block_expand_layer(input, **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.BLOCK_EXPAND, parents=[input]) + return LayerOutput(name, LayerType.BLOCK_EXPAND, parents=[input], + size=l.config.size) @wrap_name_default() @@ -3586,13 +3642,14 @@ def maxout_layer(input, assert input.num_filters is not None num_channels = input.num_filters assert num_channels % groups == 0 - Layer(name=name, + l = Layer(name=name, inputs=Input(input.name, maxout=MaxOut(channels=num_channels, groups=groups)), type=LayerType.MAXOUT, **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.MAXOUT, parents=[input]) + return LayerOutput(name, LayerType.MAXOUT, parents=[input], + size=l.config.size) @wrap_name_default() @@ -3718,7 +3775,10 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, parents = [input, label] if weight is not None: parents.append(weight) - return LayerOutput(name, LayerType.CRF_LAYER, parents, size=size) + # The size for LayerOutput means the dimension of the output. + # It's different from the meaning of crf layer, which is the number of + # classes. + return LayerOutput(name, LayerType.CRF_LAYER, parents, size=1) @wrap_name_default() @@ -3766,7 +3826,10 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, parents = [input] if label is not None: parents.append(label) - return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=size) + # The size for LayerOutput means the dimension of the output. + # It's different from the meaning of crf layer, which is the number of + # classes. + return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1) @wrap_bias_attr_default(has_bias=True) @wrap_name_default() @@ -3834,7 +3897,7 @@ def nce_layer(input, label, num_classes, weight=None, ipts_for_layer.append(weight.name) parents.append(weight) - Layer( + l = Layer( name=name, type=LayerType.NCE_LAYER, num_classes=num_classes, @@ -3844,7 +3907,8 @@ def nce_layer(input, label, num_classes, weight=None, bias=ParamAttr.to_bias(bias_attr), **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.NCE_LAYER, parents=parents) + return LayerOutput(name, LayerType.NCE_LAYER, parents=parents, + size=l.config.size) """ following are cost Layers. @@ -3919,7 +3983,7 @@ def rank_cost(left, right, label, weight=None, name=None, coeff=1.0, layer_attr= **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.RANK_COST, parents=parents) + return LayerOutput(name, LayerType.RANK_COST, parents=parents, size=1) @wrap_name_default() @@ -3971,7 +4035,8 @@ def lambda_cost(input, score, name, NDCG_num=5, max_sort_size=-1, layer_attr=Non **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.LAMBDA_COST, parents=[input, score]) + return LayerOutput(name, LayerType.LAMBDA_COST, parents=[input, score], + size=1) @wrap_name_default() @@ -4006,7 +4071,8 @@ def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None): coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.CROSS_ENTROPY, parents=[input, label]) + return LayerOutput(name, LayerType.CROSS_ENTROPY, parents=[input, label], + size=1) @wrap_name_default() @@ -4048,7 +4114,7 @@ def cross_entropy_with_selfnorm(input, label, name=None, coeff=1.0, return LayerOutput(name, LayerType.CROSS_ENTROPY_WITH_SELFNORM, - parents=[input, label]) + parents=[input, label], size=1) @wrap_name_default() @@ -4083,7 +4149,7 @@ def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None): coeff=coeff, **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.HUBER, parents=[input, label]) + return LayerOutput(name, LayerType.HUBER, parents=[input, label], size=1) @wrap_name_default() @@ -4126,4 +4192,4 @@ def multi_binary_label_cross_entropy(input, label, name=None, coeff=1.0, **ExtraLayerAttribute.to_kwargs(layer_attr) ) return LayerOutput(name, LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY, - parents=[input, label]) + parents=[input, label], size=1) diff --git a/python/paddle/trainer_config_helpers/math.py b/python/paddle/trainer_config_helpers/math.py index e35849b77ac53..7d7bb2914859f 100644 --- a/python/paddle/trainer_config_helpers/math.py +++ b/python/paddle/trainer_config_helpers/math.py @@ -13,10 +13,11 @@ # limitations under the License. from .layers import LayerOutput, mixed_layer, identity_projection, \ - slope_intercept_layer + slope_intercept_layer, scaling_layer, repeat_layer from .attrs import is_compatible_with from .default_decorators import * import activations as act +from paddle.trainer.config_parser import logger __all__ = [] @@ -40,7 +41,21 @@ def op(input, name=None): def add(layeroutput, other): if is_compatible_with(other, float): return slope_intercept_layer(input=layeroutput, intercept=other) - assert isinstance(other, LayerOutput) + if not isinstance(other, LayerOutput): + logger.fatal("LayerOutput can only be added with" + " another LayerOutput or a number") + if layeroutput.size == other.size: + return mixed_layer(input=[identity_projection(input=layeroutput), + identity_projection(input=other)]) + if other.size != 1 and layeroutput.size != 1: + logger.fatal("Two LayerOutput can be added only if they have equal size" + " or one of their sizes is 1. sizes are %s and %s" % + (layeroutput.size, other.size)) + elif layeroutput.size == 1: + tmp = layeroutput + layeroutput = other + other = tmp + other = repeat_layer(other, layeroutput.size) return mixed_layer(input=[identity_projection(input=layeroutput), identity_projection(input=other)]) @@ -50,10 +65,11 @@ def add(layeroutput, other): def sub(layeroutput, other): if is_compatible_with(other, float): return slope_intercept_layer(input=layeroutput, intercept=other) - assert isinstance(other, LayerOutput) + if not isinstance(other, LayerOutput): + logger.fatal("LayerOutput can only be subtracted with" + " another Layeroutput or a number") neg = slope_intercept_layer(input=other, slope=-1.0) - return mixed_layer(input=[identity_projection(input=layeroutput), - identity_projection(input=neg)]) + return add(layeroutput, neg) LayerOutput.__sub__ = sub @@ -62,3 +78,20 @@ def rsub(layeroutput, other): return add(neg, other) LayerOutput.__rsub__ = rsub + +def mul(layeroutput, other): + if is_compatible_with(other, float): + return slope_intercept_layer(input=layeroutput, slope=other) + if not isinstance(other, LayerOutput): + logger.fatal("LayerOutput can only be multiplied with" + " another Layeroutput or a number") + elif layeroutput.size == 1: + return scaling_layer(input=other, weight=layeroutput) + elif other.size == 1: + return scaling_layer(input=layeroutput, weight=other) + else: + logger.fatal("At least one of the operand of '*' must be a number" + " or a LayerOutput with size=1") + +LayerOutput.__mul__ = mul +LayerOutput.__rmul__ = mul diff --git a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py index fe515b7029336..7c2770c616dc1 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/math_ops.py +++ b/python/paddle/trainer_config_helpers/tests/configs/math_ops.py @@ -19,6 +19,12 @@ y = y - x y = y - 2 y = 2 - y - +y = 2 * y +y = y * 3 +z= data_layer(name='data_2', size=1) +y = y * z +y = z * y +y = y + z +y = z + y outputs(y) diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr index 1767445c44bf5..da8da1b541f37 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/math_ops.protostr @@ -209,8 +209,129 @@ layers { slope: 1.0 intercept: 2 } +layers { + name: "__slope_intercept_layer_6__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__slope_intercept_layer_5__" + } + slope: 2 + intercept: 0.0 +} +layers { + name: "__slope_intercept_layer_7__" + type: "slope_intercept" + size: 100 + active_type: "" + inputs { + input_layer_name: "__slope_intercept_layer_6__" + } + slope: 3 + intercept: 0.0 +} +layers { + name: "data_2" + type: "data" + size: 1 + active_type: "" +} +layers { + name: "__scaling_layer_0__" + type: "scaling" + size: 100 + active_type: "" + inputs { + input_layer_name: "data_2" + } + inputs { + input_layer_name: "__slope_intercept_layer_7__" + } +} +layers { + name: "__scaling_layer_1__" + type: "scaling" + size: 100 + active_type: "" + inputs { + input_layer_name: "data_2" + } + inputs { + input_layer_name: "__scaling_layer_0__" + } +} +layers { + name: "__repeat_layer_0__" + type: "featmap_expand" + size: 100 + active_type: "" + inputs { + input_layer_name: "data_2" + } + num_filters: 100 +} +layers { + name: "__mixed_2__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__scaling_layer_1__" + proj_conf { + type: "identity" + name: "___mixed_2__.w0" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__repeat_layer_0__" + proj_conf { + type: "identity" + name: "___mixed_2__.w1" + input_size: 100 + output_size: 100 + } + } +} +layers { + name: "__repeat_layer_1__" + type: "featmap_expand" + size: 100 + active_type: "" + inputs { + input_layer_name: "data_2" + } + num_filters: 100 +} +layers { + name: "__mixed_3__" + type: "mixed" + size: 100 + active_type: "" + inputs { + input_layer_name: "__mixed_2__" + proj_conf { + type: "identity" + name: "___mixed_3__.w0" + input_size: 100 + output_size: 100 + } + } + inputs { + input_layer_name: "__repeat_layer_1__" + proj_conf { + type: "identity" + name: "___mixed_3__.w1" + input_size: 100 + output_size: 100 + } + } +} +input_layer_names: "data_2" input_layer_names: "data" -output_layer_names: "__slope_intercept_layer_5__" +output_layer_names: "__mixed_3__" sub_models { name: "root" layer_names: "data" @@ -228,8 +349,18 @@ sub_models { layer_names: "__slope_intercept_layer_3__" layer_names: "__slope_intercept_layer_4__" layer_names: "__slope_intercept_layer_5__" + layer_names: "__slope_intercept_layer_6__" + layer_names: "__slope_intercept_layer_7__" + layer_names: "data_2" + layer_names: "__scaling_layer_0__" + layer_names: "__scaling_layer_1__" + layer_names: "__repeat_layer_0__" + layer_names: "__mixed_2__" + layer_names: "__repeat_layer_1__" + layer_names: "__mixed_3__" + input_layer_names: "data_2" input_layer_names: "data" - output_layer_names: "__slope_intercept_layer_5__" + output_layer_names: "__mixed_3__" is_recurrent_layer_group: false } From 45f6e1abee60f0bb14c84b455ffe47a903c3c806 Mon Sep 17 00:00:00 2001 From: Haonan Date: Thu, 10 Nov 2016 16:29:23 -0800 Subject: [PATCH 148/180] change hlactivetype instead of act.name --- paddle/utils/Util.cpp | 2 +- python/paddle/trainer_config_helpers/activations.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/utils/Util.cpp b/paddle/utils/Util.cpp index 2cdff9d1aca92..b16d4314654ff 100644 --- a/paddle/utils/Util.cpp +++ b/paddle/utils/Util.cpp @@ -378,7 +378,7 @@ hl_activation_mode_t hlActiveType(const std::string& type) { return HL_ACTIVATION_RELU; } else if (type == "tanh") { return HL_ACTIVATION_TANH; - } else if (type == "linear") { + } else if (type == "linear" || type == "") { return HL_ACTIVATION_LINEAR; } else { LOG(FATAL) << "Do not support activation type " << type; diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index 29b5437446d78..2202d0bf96976 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -104,7 +104,7 @@ class IdentityActivation(BaseActivation): Just do nothing for output both forward/backward. """ - def __init__(self): BaseActivation.__init__(self, 'linear', False) + def __init__(self): BaseActivation.__init__(self, '', False) LinearActivation = IdentityActivation From aa560dbb97d110086b9bdbd361fed8db95d93a89 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 11 Nov 2016 12:10:10 +0800 Subject: [PATCH 149/180] fix bug in sum_cost --- python/paddle/trainer_config_helpers/layers.py | 18 ++++++++++++------ .../tests/configs/test_cost_layers.py | 2 +- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 10737c90ccfa3..92e09b51eb652 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -4048,7 +4048,8 @@ def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None): .. code-block:: python - cost = cross_entropy(input, label) + cost = cross_entropy(input=input_layer, + label=label_layer) :param input: The first input layer. :type input: LayerOutput. @@ -4084,7 +4085,8 @@ def cross_entropy_with_selfnorm(input, label, name=None, coeff=1.0, .. code-block:: python - cost = cross_entropy_with_selfnorm(input, label) + cost = cross_entropy_with_selfnorm(input=input_layer, + label=label_layer) :param input: The first input layer. :type input: LayerOutput. @@ -4122,7 +4124,7 @@ def sum_cost(input, name=None, layer_attr=None): .. code-block:: python - cost = sum_cost(input) + cost = sum_cost(input=input_layer) :param input: The first input layer. :type input: LayerOutput. @@ -4133,6 +4135,7 @@ def sum_cost(input, name=None, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput. """ + assert isinstance(input, LayerOutput) Layer(name=name, type=LayerType.SUM_COST, inputs=[input.name], @@ -4141,7 +4144,8 @@ def sum_cost(input, name=None, layer_attr=None): return LayerOutput(name, LayerType.SUM_COST, - parents=[input]) + parents=[input], + size=1) @wrap_name_default() @@ -4152,7 +4156,8 @@ def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None): .. code-block:: python - cost = huber_cost(input, label) + cost = huber_cost(input=input_layer, + label=label_layer) :param input: The first input layer. :type input: LayerOutput. @@ -4188,7 +4193,8 @@ def multi_binary_label_cross_entropy(input, label, name=None, coeff=1.0, .. code-block:: python - cost = multi_binary_label_cross_entropy(input, label) + cost = multi_binary_label_cross_entropy(input=input_layer, + label=label_layer) :param input: The first input layer. :type input: LayerOutput diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py index f1b3365f84e3e..cfaf2da001106 100644 --- a/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py +++ b/python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py @@ -25,4 +25,4 @@ huber_cost(input=data_layer(name='huber_probs', size=1), label=data_layer(name='huber_label', size=1)), multi_binary_label_cross_entropy(input=probs, label=xe_label), - sum_cost(hidden)) + sum_cost(input=hidden)) From 62c5389bad0191c8574d7be58fc1564567b913ef Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 11 Nov 2016 10:50:24 +0000 Subject: [PATCH 150/180] fix test_layerHelpers unittest error --- python/paddle/trainer_config_helpers/layers.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 82c57e7f90ad5..a0a367f2d50df 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -1827,7 +1827,6 @@ def img_pool_layer(input, pool_size, name=None, @layer_support() def spp_layer(input, name=None, num_channels=None, pool_type=None, pyramid_height=None, img_width=None, layer_attr=None): - pass """ Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. The details please refer to @@ -1864,7 +1863,7 @@ def spp_layer(input, name=None, num_channels=None, pool_type=None, if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)): type_name += '-projection' - Layer( + l = Layer( name=name, type=LayerType.SPP_LAYER, inputs=Input(input.name, @@ -1875,8 +1874,8 @@ def spp_layer(input, name=None, num_channels=None, pool_type=None, ), **ExtraLayerAttribute.to_kwargs(layer_attr) ) - return LayerOutput(name, LayerType.SPP_LAYER, parents=[input], - num_filters=num_channels) + return LayerOutput(name, layer_type=LayerType.SPP_LAYER, parents=[input], + num_filters=num_channels, size=l.config.size) def __img_norm_layer__(name, input, size, norm_type, scale, power, From b3bcc52f8f3f45bb459afcfdcd624a8817351b71 Mon Sep 17 00:00:00 2001 From: qijun Date: Fri, 11 Nov 2016 11:04:54 +0000 Subject: [PATCH 151/180] change python code style to pep8 --- .style.yapf | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.style.yapf b/.style.yapf index c9cdc2e790a40..4741fb4f3bbc6 100644 --- a/.style.yapf +++ b/.style.yapf @@ -1,5 +1,3 @@ [style] -based_on_style = google -indent_width = 4 -spaces_before_comment = 4 -split_before_logical_operator = true +based_on_style = pep8 +column_limit = 80 From 33b81648a37c282f6128548ae7eea47faf77b6d7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 11 Nov 2016 19:11:36 +0800 Subject: [PATCH 152/180] Fix bug in multple objects in define_py_sources --- python/paddle/trainer_config_helpers/data_sources.py | 2 +- .../tests/configs/generate_protostr.sh | 3 ++- .../tests/configs/test_split_datasource.py | 12 ++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index f51140656d0dc..283a45df30844 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -139,7 +139,7 @@ def __is_splitable__(o): test_obj = obj train_obj = obj if __is_splitable__(obj): - train_module, test_module = module + train_obj, test_obj = obj if args is None: args = "" diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index 9f614e3983ffa..cafc2142f25c7 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -11,7 +11,8 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -test_bilinear_interp test_maxout test_bi_grumemory math_ops) +test_bilinear_interp test_maxout test_bi_grumemory math_ops +test_spilit_datasource) for conf in ${configs[*]} diff --git a/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py new file mode 100644 index 0000000000000..c8dcb1bd8a47b --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/test_split_datasource.py @@ -0,0 +1,12 @@ +from paddle.trainer_config_helpers import * + +define_py_data_sources2(train_list="train.list", + test_list="test.list", + module=["a", "b"], + obj=("c", "d")) +settings( + learning_rate=1e-3, + batch_size=1000 +) + +outputs(data_layer(name="a", size=10)) From 4607d517bfba481cbb02b2beb1dfa3773eadfded Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 11 Nov 2016 19:31:28 +0800 Subject: [PATCH 153/180] Add unittest for split datasource * Fix #436 --- .../tests/configs/generate_protostr.sh | 10 ++- .../protostr/test_split_datasource.protostr | 72 +++++++++++++++++++ python/paddle/utils/dump_config.py | 10 ++- 3 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 python/paddle/trainer_config_helpers/tests/configs/protostr/test_split_datasource.protostr diff --git a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh index e84e2a4b7f36a..bb594ac2c245d 100755 --- a/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh +++ b/python/paddle/trainer_config_helpers/tests/configs/generate_protostr.sh @@ -11,12 +11,18 @@ test_sequence_pooling test_lstmemory_layer test_grumemory_layer last_first_seq test_expand_layer test_ntm_layers test_hsigmoid img_layers img_trans_layers util_layers simple_rnn_layers unused_layers test_cost_layers test_rnn_group shared_fc shared_lstm test_cost_layers_with_weight -test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops -test_split_datasource) +test_spp_layer test_bilinear_interp test_maxout test_bi_grumemory math_ops) +whole_configs=(test_split_datasource) for conf in ${configs[*]} do echo "Generating " $conf python -m paddle.utils.dump_config $conf.py > $protostr/$conf.protostr.unitest done + +for conf in ${whole_configs[*]} +do + echo "Generating " $conf + python -m paddle.utils.dump_config $conf.py "" --whole > $protostr/$conf.protostr.unitest +done diff --git a/python/paddle/trainer_config_helpers/tests/configs/protostr/test_split_datasource.protostr b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_split_datasource.protostr new file mode 100644 index 0000000000000..1cfb92255aa92 --- /dev/null +++ b/python/paddle/trainer_config_helpers/tests/configs/protostr/test_split_datasource.protostr @@ -0,0 +1,72 @@ +model_config { + type: "nn" + layers { + name: "a" + type: "data" + size: 10 + active_type: "" + } + input_layer_names: "a" + output_layer_names: "a" + sub_models { + name: "root" + layer_names: "a" + input_layer_names: "a" + output_layer_names: "a" + is_recurrent_layer_group: false + } +} +data_config { + type: "py2" + files: "train.list" + async_load_data: true + for_test: false + load_data_module: "a" + load_data_object: "c" + load_data_args: "" + data_ratio: 1 + is_main_data: true + usage_ratio: 1.0 +} +opt_config { + batch_size: 1000 + algorithm: "sgd" + learning_rate: 0.001 + learning_rate_decay_a: 0.0 + learning_rate_decay_b: 0.0 + l1weight: 0.1 + l2weight: 0.0 + c1: 0.0001 + backoff: 0.5 + owlqn_steps: 10 + max_backoff: 5 + l2weight_zero_iter: 0 + average_window: 0 + learning_method: "momentum" + ada_epsilon: 1e-06 + do_average_in_cpu: false + ada_rou: 0.95 + learning_rate_schedule: "poly" + delta_add_rate: 1.0 + shrink_parameter_value: 0 + adam_beta1: 0.9 + adam_beta2: 0.999 + adam_epsilon: 1e-08 + learning_rate_args: "" + async_lagged_grad_discard_ratio: 1.5 +} +test_data_config { + type: "py2" + files: "test.list" + async_load_data: true + for_test: true + load_data_module: "b" + load_data_object: "d" + load_data_args: "" + data_ratio: 1 + is_main_data: true + usage_ratio: 1.0 +} +save_dir: "./output/model" +start_pass: 0 + diff --git a/python/paddle/utils/dump_config.py b/python/paddle/utils/dump_config.py index d8a2722575d53..c5ce5c8d9a084 100644 --- a/python/paddle/utils/dump_config.py +++ b/python/paddle/utils/dump_config.py @@ -19,13 +19,21 @@ __all__ = [] if __name__ == '__main__': + whole_conf = False if len(sys.argv) == 2: conf = parse_config(sys.argv[1], '') elif len(sys.argv) == 3: conf = parse_config(sys.argv[1], sys.argv[2]) + elif len(sys.argv) == 4: + conf = parse_config(sys.argv[1], sys.argv[2]) + if sys.argv[3] == '--whole': + whole_conf = True else: raise RuntimeError() assert isinstance(conf, TrainerConfig_pb2.TrainerConfig) - print conf.model_config + if whole_conf: + print conf + else: + print conf.model_config From 069d0004dc1334987e40d151b6e8521ebc80f661 Mon Sep 17 00:00:00 2001 From: Haonan Date: Fri, 4 Nov 2016 11:46:07 -0700 Subject: [PATCH 154/180] multi_binary_cross_entropy when ids vector is provided --- paddle/cuda/include/hl_matrix.h | 30 +++++++++ paddle/cuda/include/stub/hl_matrix_stub.h | 12 ++++ paddle/cuda/src/hl_cuda_matrix.cu | 78 +++++++++++++++++++++++ paddle/gserver/layers/CostLayer.cpp | 4 ++ paddle/gserver/tests/test_LayerGrad.cpp | 7 +- paddle/math/Matrix.cpp | 36 +++++++++++ paddle/math/Matrix.h | 4 ++ paddle/math/tests/test_matrixCompare.cpp | 66 ++++++++++++++++++- paddle/parameter/Argument.cpp | 22 +++++++ paddle/parameter/Argument.h | 8 +++ 10 files changed, 263 insertions(+), 4 deletions(-) diff --git a/paddle/cuda/include/hl_matrix.h b/paddle/cuda/include/hl_matrix.h index 71e8f8e3a60c9..6195e30b9974d 100644 --- a/paddle/cuda/include/hl_matrix.h +++ b/paddle/cuda/include/hl_matrix.h @@ -126,6 +126,36 @@ extern void hl_matrix_cross_entropy_bp(real* grad_d, int dimM, int dimN); +/** + * @brief Matrix multi-binary label cross entropy + * + * @param[in] output input matrix (M x N). + * @param[out] entropy output matrix (M x 1). + * @param[in] mat input sparse matrix. + * @param[in] dimM matrix height. + * @param[in] dimN matrix width. + */ +extern void hl_matrix_multi_binary_cross_entropy(real* output, + real* entropy, + hl_sparse_matrix_s mat, + int dimM, + int dimN); + +/** + * @brief Matrix multi-binary label cross entropy backprop + * + * @param[in] output input matrix (M x N). + * @param[out] grad output matrix (M x N). + * @param[in] mat input sparse matrix. + * @param[in] dimM matrix height. + * @param[in] dimN matrix width. + */ +extern void hl_matrix_multi_binary_cross_entropy_bp(real* output, + real* grad, + hl_sparse_matrix_s mat, + int dimM, + int dimN); + /** * @brief Matrix zero memory. * diff --git a/paddle/cuda/include/stub/hl_matrix_stub.h b/paddle/cuda/include/stub/hl_matrix_stub.h index e37b1275432ca..76cac2e577693 100644 --- a/paddle/cuda/include/stub/hl_matrix_stub.h +++ b/paddle/cuda/include/stub/hl_matrix_stub.h @@ -57,6 +57,18 @@ inline void hl_matrix_cross_entropy_bp(real* grad_d, int dimM, int dimN) {} +inline void hl_matrix_multi_binary_cross_entropy(real* output, + real* entropy, + hl_sparse_matrix_s mat, + int dimM, + int dimN) {} + +inline void hl_matrix_multi_binary_cross_entropy_bp(real* output, + real* grad, + hl_sparse_matrix_s mat, + int dimM, + int dimN) {} + inline void hl_matrix_zero_mem(real* data, int num) {} inline void hl_param_relu_forward(real* output, diff --git a/paddle/cuda/src/hl_cuda_matrix.cu b/paddle/cuda/src/hl_cuda_matrix.cu index 3df9f63f9e4b7..001b62a6b94d6 100644 --- a/paddle/cuda/src/hl_cuda_matrix.cu +++ b/paddle/cuda/src/hl_cuda_matrix.cu @@ -18,6 +18,7 @@ limitations under the License. */ #include "hl_matrix_ops.cuh" #include "hl_matrix_apply.cuh" #include "hl_sequence.h" +#include "hl_sparse.ph" #include "paddle/utils/Logging.h" #include "hl_device_functions.cuh" #include "hl_gpu_matrix_kernel.cuh" @@ -317,6 +318,83 @@ void hl_matrix_classification_error(real* A_d, CHECK_SYNC("hl_matrix_classification_error"); } +__global__ void KeMatrixMultiBinaryCrossEntropy(real* output, + real* entropy, + int* row, + int* col, + int dimM, + int dimN) { + int index = blockIdx.x * blockDim.x + threadIdx.x; + if (index < dimM) { + for (int i = 0; i < dimN; i ++) { + entropy[index] -= log(1 - output[index * dimN + i]); + } + int *row_col = col + row[index]; + int col_num = row[index + 1] - row[index]; + for (int i = 0; i < col_num; i ++) { + real o = output[index * dimN + row_col[i]]; + entropy[index] -= log(o / (1 - o)); + } + } +} + +void hl_matrix_multi_binary_cross_entropy(real* output, + real* entropy, + hl_sparse_matrix_s csr_mat, + int dimM, + int dimN) { + CHECK_NOTNULL(output); + CHECK_NOTNULL(entropy); + CHECK_NOTNULL(csr_mat); + int n_threads = 1024; + int blocks = (dimM + n_threads - 1) / n_threads; + dim3 threads(n_threads); + dim3 grid(blocks); + hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix); + KeMatrixMultiBinaryCrossEntropy<<< grid, threads, 0, STREAM_DEFAULT >>> + (output, entropy, mat->csr_row, mat->csr_col, dimM, dimN); + CHECK_SYNC("hl_matrix_multi_binary_cross_entropy failed"); +} + +__global__ void KeMatrixMultiBinaryCrossEntropyBp(real* output, + real* grad, + int* row, + int* col, + int dimM, + int dimN) { + int row_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (row_idx < dimM) { + for (int i = 0; i < dimN; i ++) { + int index = row_idx * dimN + i; + grad[index] += 1.0 / (1 - output[index]); + } + int col_num = row[row_idx + 1] - row[row_idx]; + int *row_col = col + row[row_idx]; + for (int i = 0; i < col_num; i ++) { + int index = row_idx * dimN + row_col[i]; + grad[index] -= 1.0 / (output[index] * (1 - output[index])); + } + } +} + +void hl_matrix_multi_binary_cross_entropy_bp(real* output, + real* grad, + hl_sparse_matrix_s csr_mat, + int dimM, + int dimN) { + CHECK_NOTNULL(output); + CHECK_NOTNULL(grad); + CHECK_NOTNULL(csr_mat); + int n_threads = 1024; + int blocks = (dimM + n_threads - 1) / n_threads; + dim3 threads(n_threads); + dim3 grid(blocks); + hl_csr_matrix mat = (hl_csr_matrix)(csr_mat->matrix); + KeMatrixMultiBinaryCrossEntropyBp<<< grid, threads, 0, STREAM_DEFAULT >>> + (output, grad, mat->csr_row, mat->csr_col, dimM, dimN); + CHECK_SYNC("hl_matrix_multi_binary_cross_entropy_bp failed"); +} + __global__ void KeMatrixCrossEntropy(real* O, real* E, int* label, diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index 949788be49787..c86e562d0e445 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -462,6 +462,8 @@ bool MultiBinaryLabelCrossEntropy::init(const LayerMap& layerMap, void MultiBinaryLabelCrossEntropy::forwardImp(Matrix& output, Argument& label, Matrix& target) { + label.idsToSparseMatrix(output.getWidth(), useGpu_); + if (dynamic_cast(label.value.get()) || dynamic_cast(label.value.get())) { target.multiBinaryLabelCrossEntropy(output, *label.value); @@ -476,6 +478,8 @@ void MultiBinaryLabelCrossEntropy::forwardImp(Matrix& output, Argument& label, void MultiBinaryLabelCrossEntropy::backwardImp( Matrix& output, Argument& label, Matrix& outputG) { + label.idsToSparseMatrix(output.getWidth(), useGpu_); + if (dynamic_cast(label.value.get()) || dynamic_cast(label.value.get())) { outputG.multiBinaryLabelCrossEntropyBp(output, *label.value); diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/gserver/tests/test_LayerGrad.cpp index e7e07e9e69dc7..f19c14f56925a 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/gserver/tests/test_LayerGrad.cpp @@ -538,9 +538,10 @@ TEST(Layer, multi_binary_label) { config.layerConfig.add_inputs(); config.layerConfig.add_inputs(); - // Not support GPU now - testLayerGrad(config, "multi_binary_label_cross_entropy", 100, - /* trans */ false, /* useGpu */ false); + for (auto useGpu : {false, true}) { + testLayerGrad(config, "multi_binary_label_cross_entropy", 100, + /* trans */ false, useGpu); + } } TEST(Layer, multi_cross_with_selfnorm) { diff --git a/paddle/math/Matrix.cpp b/paddle/math/Matrix.cpp index 950c3bb6cca28..9acc6005532fc 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/math/Matrix.cpp @@ -1268,6 +1268,42 @@ void GpuMatrix::bilinearBackward(const Matrix& out, } } +void GpuMatrix::multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label) { + GpuMatrix* output_ptr = dynamic_cast(&output); + auto label_ptr = dynamic_cast(&label); + + CHECK(output_ptr && label_ptr) << "Invalid argument pointer"; + CHECK(label_ptr->format_ == SPARSE_CSR) << "Matrix format not supported"; + CHECK(height_ == output_ptr->height_ && width_ == 1 + && output_ptr->width_ == label_ptr->getWidth() + && output_ptr->height_ == label_ptr->getHeight()) + << "Matrix dimensions are not equal"; + + real* output_d = output_ptr->data_; + real* entropy_d = data_; + hl_sparse_matrix_s mat_d = label_ptr->sMatrix_.get(); + hl_matrix_multi_binary_cross_entropy( + output_d, entropy_d, mat_d, height_, output_ptr->width_); +} + +void GpuMatrix::multiBinaryLabelCrossEntropyBp(Matrix &output, Matrix &label) { + GpuMatrix* output_ptr = dynamic_cast(&output); + auto label_ptr = dynamic_cast(&label); + + CHECK(output_ptr && label_ptr) << "Invalid argument pointer"; + CHECK(label_ptr->format_ == SPARSE_CSR) << "Matrix format not supported"; + CHECK(height_ == output_ptr->height_ && width_ == output_ptr->width_ + && output_ptr->width_ == label_ptr->getWidth() + && output_ptr->height_ == label_ptr->getHeight()) + << "Matrix dimensions are not equal"; + + real* output_d = output_ptr->data_; + real* grad_d = data_; + hl_sparse_matrix_s mat_d = label_ptr->sMatrix_.get(); + hl_matrix_multi_binary_cross_entropy_bp( + output_d, grad_d, mat_d, height_, width_); +} + /** * CpuMatrix */ diff --git a/paddle/math/Matrix.h b/paddle/math/Matrix.h index 700be7590240c..6c3c4804d2fc6 100644 --- a/paddle/math/Matrix.h +++ b/paddle/math/Matrix.h @@ -1303,6 +1303,10 @@ class GpuMatrix : public Matrix { const size_t numChannels, const real ratioH, const real ratioW); + + void multiBinaryLabelCrossEntropy(Matrix& output, Matrix& label); + + void multiBinaryLabelCrossEntropyBp(Matrix& output, Matrix& label); }; class CpuMatrix : public Matrix { diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/math/tests/test_matrixCompare.cpp index b3ee4bc34995a..a41e21903f560 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/math/tests/test_matrixCompare.cpp @@ -2208,7 +2208,6 @@ void testCollectSharedBias(int numSamples, int dim, int channel) { MatrixCheckErr(*cpuBias, *check); } - TEST(Matrix, sharedBias) { for (auto numSamples : {1, 100, 520}) { for (auto dim : {100 * 16, 100 * 32}) { @@ -2222,6 +2221,71 @@ TEST(Matrix, sharedBias) { } } +void testMultiBinaryLabelCrossEntropy(int numSamples, int dim) { + MatrixPtr output = std::make_shared(numSamples, dim); + MatrixPtr cpuOutput = std::make_shared(numSamples, dim); + MatrixPtr gpuOutput = std::make_shared(numSamples, dim); + + MatrixPtr cpuEntropy = std::make_shared(numSamples, 1); + MatrixPtr gpuEntropy = std::make_shared(numSamples, 1); + + MatrixPtr cpuGrad = std::make_shared(numSamples, dim); + MatrixPtr gpuGrad = std::make_shared(numSamples, dim); + + auto cpuRows = IVector::create(numSamples + 1, false); + auto cpuCols = IVector::create(numSamples, false); + auto gpuRows = IVector::create(numSamples + 1, true); + auto gpuCols = IVector::create(numSamples, true); + cpuRows->setElement(0, 0); + gpuRows->setElement(0, 0); + for (int i = 0; i < numSamples; i ++) { + int id = rand() % dim; // NOLINT + cpuRows->setElement(i + 1, i + 1); + gpuRows->setElement(i + 1, i + 1); + cpuCols->setElement(i, id); + gpuCols->setElement(i, id); + } + + MatrixPtr cpuLabel = std::make_shared + (nullptr, cpuRows->getData(), cpuCols->getData(), + numSamples, dim, numSamples, NO_VALUE, SPARSE_CSR, false); + MatrixPtr gpuLabel = std::make_shared + (nullptr, gpuRows->getData(), gpuCols->getData(), + numSamples, dim, numSamples, NO_VALUE, SPARSE_CSR, false); + + output->randomizeUniform(); + cpuOutput->zeroMem(); + output->softmax(*cpuOutput); + gpuOutput->copyFrom(*cpuOutput); + + cpuEntropy->zeroMem(); + gpuEntropy->zeroMem(); + cpuEntropy->multiBinaryLabelCrossEntropy(*cpuOutput, *cpuLabel); + gpuEntropy->multiBinaryLabelCrossEntropy(*gpuOutput, *gpuLabel); + + MatrixPtr check1 = std::make_shared(numSamples, 1); + check1->copyFrom(*gpuEntropy); + MatrixCheckErr(*cpuEntropy, *check1); + + cpuGrad->zeroMem(); + gpuGrad->zeroMem(); + cpuGrad->multiBinaryLabelCrossEntropyBp(*cpuOutput, *cpuLabel); + gpuGrad->multiBinaryLabelCrossEntropyBp(*gpuOutput, *gpuLabel); + + MatrixPtr check2 = std::make_shared(numSamples, dim); + check2->copyFrom(*gpuGrad); + MatrixCheckErr(*cpuGrad, *check2); +} + +TEST(Matrix, multiBinaryCrossEntropy) { + for (auto numSamples : {1, 100, 500}) { + for (auto dim : {1000, 10000, 100000}) { + VLOG(3) << " numSamples=" << numSamples << " dim=" << dim; + testMultiBinaryLabelCrossEntropy(numSamples, dim); + } + } +} + int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); initMain(argc, argv); diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index 42c74661d2b2c..a5a96742e4cad 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -572,4 +572,26 @@ void Argument::subArgFrom(const Argument& input, size_t offset, size_t height, } } +void Argument::idsToSparseMatrix(int width, bool useGpu) { + if (ids) { + CHECK(!value); + int height = ids->getSize(); + int nnz = height; + auto rows = IVector::create(height + 1, useGpu); + auto cols = IVector::create(nnz, useGpu); + rows->setElement(0, 0); + for (int i = 0; i < height; i ++) { + int id = ids->getElement(i); + CHECK_LT(id, width); + rows->setElement(i + 1, i + 1); + cols->setElement(i, id); + } + value = Matrix::createSparseMatrix( + nullptr, rows->getData(), cols->getData(), + height, width, nnz, NO_VALUE, SPARSE_CSR, false, useGpu); + } else { + CHECK(value); + } +} + } // namespace paddle diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 81ff9029bc4c8..48e1551258fe7 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -286,6 +286,14 @@ struct Argument { sequence has sub-sequence degrades to a sequence. */ void degradeSequence(const Argument& input, bool useGpu); + + /* + @brief convert the ids vector to value as a sparse matrix + the ids vector keeps valid + @param the matrix width (id range) + @useGpu + */ + void idsToSparseMatrix(int width, bool useGpu); }; } // namespace paddle From 728defbec90a162ee5d7f8521106ded7797e72fa Mon Sep 17 00:00:00 2001 From: Haonan Date: Fri, 11 Nov 2016 16:31:56 -0800 Subject: [PATCH 155/180] copy the data when createSparseMatrix --- paddle/gserver/layers/CostLayer.cpp | 40 ++++++++++++++++------ paddle/parameter/Argument.cpp | 52 +++++++++++++++++++---------- paddle/parameter/Argument.h | 6 ++-- 3 files changed, 66 insertions(+), 32 deletions(-) diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/gserver/layers/CostLayer.cpp index c86e562d0e445..900981d1e7d36 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/gserver/layers/CostLayer.cpp @@ -462,29 +462,49 @@ bool MultiBinaryLabelCrossEntropy::init(const LayerMap& layerMap, void MultiBinaryLabelCrossEntropy::forwardImp(Matrix& output, Argument& label, Matrix& target) { - label.idsToSparseMatrix(output.getWidth(), useGpu_); + MatrixPtr value = nullptr; + if (label.ids) { + CHECK(!label.value); + value = Matrix::createSparseMatrix( + label.ids->getSize(), output.getWidth(), label.ids->getSize(), + NO_VALUE, SPARSE_CSR, false, useGpu_); + label.idsToSparseMatrix(value); + } else { + CHECK(label.value); + value = label.value; + } - if (dynamic_cast(label.value.get()) || - dynamic_cast(label.value.get())) { - target.multiBinaryLabelCrossEntropy(output, *label.value); + if (dynamic_cast(value.get()) || + dynamic_cast(value.get())) { + target.multiBinaryLabelCrossEntropy(output, *value); } else { Matrix::resizeOrCreate(targetPerDim_, output.getHeight(), output.getWidth(), false, useGpu_); - targetPerDim_->binaryLabelCrossEntropy(output, *label.value); + targetPerDim_->binaryLabelCrossEntropy(output, *value); targetPerDim_->rowSum(target); } } void MultiBinaryLabelCrossEntropy::backwardImp( Matrix& output, Argument& label, Matrix& outputG) { - label.idsToSparseMatrix(output.getWidth(), useGpu_); + MatrixPtr value = nullptr; + if (label.ids) { + CHECK(!value); + value = Matrix::createSparseMatrix( + label.ids->getSize(), output.getWidth(), label.ids->getSize(), + NO_VALUE, SPARSE_CSR, false, useGpu_); + label.idsToSparseMatrix(value); + } else { + CHECK(label.value); + value = label.value; + } - if (dynamic_cast(label.value.get()) || - dynamic_cast(label.value.get())) { - outputG.multiBinaryLabelCrossEntropyBp(output, *label.value); + if (dynamic_cast(value.get()) || + dynamic_cast(value.get())) { + outputG.multiBinaryLabelCrossEntropyBp(output, *value); } else { - outputG.binaryLabelCrossEntropyBp(output, *label.value); + outputG.binaryLabelCrossEntropyBp(output, *value); } } diff --git a/paddle/parameter/Argument.cpp b/paddle/parameter/Argument.cpp index a5a96742e4cad..354d0ead071b3 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/parameter/Argument.cpp @@ -572,25 +572,41 @@ void Argument::subArgFrom(const Argument& input, size_t offset, size_t height, } } -void Argument::idsToSparseMatrix(int width, bool useGpu) { - if (ids) { - CHECK(!value); - int height = ids->getSize(); - int nnz = height; - auto rows = IVector::create(height + 1, useGpu); - auto cols = IVector::create(nnz, useGpu); - rows->setElement(0, 0); - for (int i = 0; i < height; i ++) { - int id = ids->getElement(i); - CHECK_LT(id, width); - rows->setElement(i + 1, i + 1); - cols->setElement(i, id); - } - value = Matrix::createSparseMatrix( - nullptr, rows->getData(), cols->getData(), - height, width, nnz, NO_VALUE, SPARSE_CSR, false, useGpu); +void Argument::idsToSparseMatrix(MatrixPtr sparse_mat) { + int height = ids->getSize(); + int width = sparse_mat->getWidth(); + + CpuIVector cpu_ids(height); + cpu_ids.copyFrom(*ids); + int *id_data = cpu_ids.getData(); + + int *rows = nullptr; + int *cols = nullptr; + if (sparse_mat->useGpu()) { + auto gpu_sparse_mat = + dynamic_cast(sparse_mat.get()); + rows = gpu_sparse_mat->rows_; + cols = gpu_sparse_mat->cols_; } else { - CHECK(value); + rows = sparse_mat->getRows(); + cols = sparse_mat->getCols(); + } + + rows[0] = 0; + for (int i = 0; i < height; i ++) { + int id = id_data[i]; + CHECK_LT(id, width); + rows[i + 1] = i + 1; + cols[i] = id; + } + + if (sparse_mat->useGpu()) { + auto gpu_sparse_mat = + dynamic_cast(sparse_mat.get()); + hl_memcpy_csr_matrix(gpu_sparse_mat->sMatrix_.get(), + nullptr, rows, cols, + HPPL_STREAM_DEFAULT); + hl_stream_synchronize(HPPL_STREAM_DEFAULT); } } diff --git a/paddle/parameter/Argument.h b/paddle/parameter/Argument.h index 48e1551258fe7..695033138b545 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/parameter/Argument.h @@ -289,11 +289,9 @@ struct Argument { /* @brief convert the ids vector to value as a sparse matrix - the ids vector keeps valid - @param the matrix width (id range) - @useGpu + @param[out] the output sparse_mat (already allocated) */ - void idsToSparseMatrix(int width, bool useGpu); + void idsToSparseMatrix(MatrixPtr sparse_mat); }; } // namespace paddle From 319742c641d32d64113655c0b0129f10b7ca42b6 Mon Sep 17 00:00:00 2001 From: qijun Date: Sat, 12 Nov 2016 02:23:15 +0000 Subject: [PATCH 156/180] format python code in demo, doc, doc_cn and paddle directories --- .../data/process_cifar.py | 30 +- demo/image_classification/image_provider.py | 12 +- demo/image_classification/image_util.py | 76 +++-- demo/image_classification/prediction.py | 59 ++-- demo/image_classification/preprocess.py | 48 ++-- demo/image_classification/vgg_16_cifar.py | 50 ++-- demo/introduction/dataprovider.py | 6 +- demo/introduction/evaluate_model.py | 7 +- demo/introduction/trainer_config.py | 19 +- demo/mnist/data/generate_list.py | 6 +- demo/mnist/mnist_provider.py | 7 +- demo/mnist/vgg_16_mnist.py | 29 +- demo/model_zoo/embedding/extract_para.py | 45 ++- demo/model_zoo/embedding/paraconvert.py | 32 ++- demo/model_zoo/resnet/classify.py | 133 +++++---- demo/model_zoo/resnet/example/__init__.py | 1 - .../resnet/example/image_list_provider.py | 15 +- demo/model_zoo/resnet/load_feature.py | 12 +- demo/model_zoo/resnet/resnet.py | 265 +++++++++--------- demo/quick_start/api_train.py | 42 +-- demo/quick_start/dataprovider_bow.py | 10 +- demo/quick_start/dataprovider_emb.py | 6 +- demo/quick_start/preprocess.py | 4 +- demo/quick_start/trainer_config.bidi-lstm.py | 21 +- demo/quick_start/trainer_config.cnn.py | 14 +- demo/quick_start/trainer_config.db-lstm.py | 29 +- demo/quick_start/trainer_config.emb.py | 16 +- demo/quick_start/trainer_config.lr.py | 14 +- demo/quick_start/trainer_config.lstm.py | 22 +- demo/recommendation/common_utils.py | 7 +- demo/recommendation/data/config_generator.py | 17 +- demo/recommendation/data/meta_generator.py | 30 +- demo/recommendation/data/split.py | 1 - demo/recommendation/dataprovider.py | 2 + demo/recommendation/prediction.py | 10 +- demo/recommendation/trainer_config.py | 42 +-- demo/semantic_role_labeling/dataprovider.py | 6 +- demo/semantic_role_labeling/db_lstm.py | 3 +- demo/semantic_role_labeling/predict.py | 20 +- demo/sentiment/dataprovider.py | 9 +- demo/sentiment/predict.py | 63 +++-- demo/sentiment/preprocess.py | 93 +++--- demo/sentiment/sentiment_net.py | 48 ++-- demo/sentiment/trainer_config.py | 17 +- demo/seqToseq/dataprovider.py | 9 +- demo/seqToseq/preprocess.py | 69 +++-- demo/seqToseq/seqToseq_net.py | 106 +++---- demo/sequence_tagging/dataprovider.py | 106 +++---- demo/sequence_tagging/linear_crf.py | 42 ++- demo/sequence_tagging/rnn_crf.py | 80 +++--- doc/ui/predict/predict_sample.py | 173 +++++++----- doc_cn/concepts/trainer_config.py | 28 +- doc_cn/faq/word2vec_config.py | 14 +- doc_cn/faq/word2vec_dataprovider.py | 14 +- doc_cn/ui/data_provider/mnist_config.py | 9 +- .../ui/data_provider/mnist_provider.dict.py | 7 +- doc_cn/ui/data_provider/mnist_provider.py | 5 +- doc_cn/ui/data_provider/sentimental_config.py | 15 +- .../ui/data_provider/sentimental_provider.py | 3 +- paddle/api/__init__.py | 1 - paddle/api/paddle_ld_flags.py | 34 ++- paddle/api/test/testArguments.py | 2 +- paddle/api/test/testGradientMachine.py | 6 +- paddle/api/test/testMatrix.py | 3 +- paddle/api/test/testTrain.py | 3 +- paddle/api/test/testTrainConfig.py | 5 +- paddle/api/test/testTrainer.py | 6 +- paddle/api/test/testVector.py | 3 +- paddle/gserver/tests/__init__.py | 1 - paddle/gserver/tests/pyDataProvider.py | 97 ++++--- paddle/gserver/tests/rnn_data_provider.py | 59 ++-- paddle/gserver/tests/sequenceGen.py | 22 +- .../gserver/tests/sequence_layer_group.conf | 38 +-- .../tests/sequence_nest_layer_group.conf | 64 +++-- paddle/gserver/tests/test_PyDataProvider2.py | 38 +-- paddle/py_paddle/__init__.py | 11 +- paddle/py_paddle/dataprovider_converter.py | 29 +- paddle/py_paddle/util.py | 82 +++--- paddle/scripts/cluster_train/conf.py | 11 +- paddle/scripts/cluster_train/paddle.py | 63 +++-- paddle/trainer/tests/__init__.py | 1 - paddle/trainer/tests/config_parser_test.py | 2 +- paddle/trainer/tests/gen_proto_data.py | 127 ++++----- paddle/trainer/tests/testPyDataWrapper.py | 49 +++- paddle/utils/enable_virtualenv.py | 12 +- 85 files changed, 1580 insertions(+), 1267 deletions(-) diff --git a/demo/image_classification/data/process_cifar.py b/demo/image_classification/data/process_cifar.py index b766118eb0073..b235010e4ece3 100644 --- a/demo/image_classification/data/process_cifar.py +++ b/demo/image_classification/data/process_cifar.py @@ -16,7 +16,6 @@ import sys import os import PIL.Image as Image - """ Usage: python process_cifar input_dir output_dir """ @@ -30,6 +29,7 @@ def mkdir_not_exist(path): if not os.path.exists(path): os.mkdir(path) + def create_dir_structure(output_dir): """ Create the directory structure for the directory. @@ -39,8 +39,8 @@ def create_dir_structure(output_dir): mkdir_not_exist(os.path.join(output_dir, "train")) mkdir_not_exist(os.path.join(output_dir, "test")) -def convert_batch(batch_path, label_set, label_map, - output_dir, data_split): + +def convert_batch(batch_path, label_set, label_map, output_dir, data_split): """ Convert CIFAR batch to the structure of Paddle format. batch_path: the batch to be converted. @@ -67,11 +67,23 @@ def convert_batch(batch_path, label_set, label_map, output_dir = sys.argv[2] num_batch = 5 create_dir_structure(output_dir) - label_map = {0: "airplane", 1: "automobile", 2: "bird", 3: "cat", 4: "deer", - 5: "dog", 6: "frog", 7: "horse", 8: "ship", 9: "truck"} + label_map = { + 0: "airplane", + 1: "automobile", + 2: "bird", + 3: "cat", + 4: "deer", + 5: "dog", + 6: "frog", + 7: "horse", + 8: "ship", + 9: "truck" + } labels = {} for i in range(1, num_batch + 1): - convert_batch(os.path.join(input_dir, "data_batch_%d" % i), labels, - label_map, output_dir, "train") - convert_batch(os.path.join(input_dir, "test_batch"), {}, - label_map, output_dir, "test") \ No newline at end of file + convert_batch( + os.path.join(input_dir, "data_batch_%d" % i), labels, label_map, + output_dir, "train") + convert_batch( + os.path.join(input_dir, "test_batch"), {}, label_map, output_dir, + "test") diff --git a/demo/image_classification/image_provider.py b/demo/image_classification/image_provider.py index 305efbcdc6bb1..28bf1bb02c1f0 100644 --- a/demo/image_classification/image_provider.py +++ b/demo/image_classification/image_provider.py @@ -46,14 +46,14 @@ def hook(settings, img_size, mean_img_size, num_classes, color, meta, use_jpeg, settings.img_mean = image_util.load_meta(settings.meta_path, settings.mean_img_size, - settings.img_size, - settings.color) + settings.img_size, settings.color) settings.logger.info('Image size: %s', settings.img_size) settings.logger.info('Meta path: %s', settings.meta_path) settings.input_types = [ dense_vector(settings.img_raw_size), # image feature - integer_value(settings.num_classes)] # labels + integer_value(settings.num_classes) + ] # labels settings.logger.info('DataProvider Initialization finished') @@ -79,8 +79,8 @@ def processData(settings, file_list): img = image_util.decode_jpeg(data['images'][i]) else: img = data['images'][i] - img_feat = image_util.preprocess_img(img, settings.img_mean, - settings.img_size, settings.is_train, - settings.color) + img_feat = image_util.preprocess_img( + img, settings.img_mean, settings.img_size, + settings.is_train, settings.color) label = data['labels'][i] yield img_feat.astype('float32'), int(label) diff --git a/demo/image_classification/image_util.py b/demo/image_classification/image_util.py index c545d16aafbc7..b5c6431c06f77 100644 --- a/demo/image_classification/image_util.py +++ b/demo/image_classification/image_util.py @@ -16,17 +16,20 @@ from PIL import Image from cStringIO import StringIO + def resize_image(img, target_size): """ Resize an image so that the shorter edge has length target_size. img: the input image to be resized. target_size: the target resized image size. """ - percent = (target_size/float(min(img.size[0], img.size[1]))) - resized_size = int(round(img.size[0] * percent)), int(round(img.size[1] * percent)) + percent = (target_size / float(min(img.size[0], img.size[1]))) + resized_size = int(round(img.size[0] * percent)), int( + round(img.size[1] * percent)) img = img.resize(resized_size, Image.ANTIALIAS) return img + def flip(im): """ Return the flipped image. @@ -38,6 +41,7 @@ def flip(im): else: return im[:, ::-1] + def crop_img(im, inner_size, color=True, test=True): """ Return cropped image. @@ -50,20 +54,22 @@ def crop_img(im, inner_size, color=True, test=True): If True, crop the center of images. """ if color: - height, width = max(inner_size, im.shape[1]), max(inner_size, im.shape[2]) + height, width = max(inner_size, im.shape[1]), max(inner_size, + im.shape[2]) padded_im = np.zeros((3, height, width)) startY = (height - im.shape[1]) / 2 startX = (width - im.shape[2]) / 2 endY, endX = startY + im.shape[1], startX + im.shape[2] - padded_im[:, startY: endY, startX: endX] = im + padded_im[:, startY:endY, startX:endX] = im else: im = im.astype('float32') - height, width = max(inner_size, im.shape[0]), max(inner_size, im.shape[1]) + height, width = max(inner_size, im.shape[0]), max(inner_size, + im.shape[1]) padded_im = np.zeros((height, width)) startY = (height - im.shape[0]) / 2 startX = (width - im.shape[1]) / 2 endY, endX = startY + im.shape[0], startX + im.shape[1] - padded_im[startY: endY, startX: endX] = im + padded_im[startY:endY, startX:endX] = im if test: startY = (height - inner_size) / 2 startX = (width - inner_size) / 2 @@ -72,19 +78,21 @@ def crop_img(im, inner_size, color=True, test=True): startX = np.random.randint(0, width - inner_size + 1) endY, endX = startY + inner_size, startX + inner_size if color: - pic = padded_im[:, startY: endY, startX: endX] + pic = padded_im[:, startY:endY, startX:endX] else: - pic = padded_im[startY: endY, startX: endX] + pic = padded_im[startY:endY, startX:endX] if (not test) and (np.random.randint(2) == 0): pic = flip(pic) return pic + def decode_jpeg(jpeg_string): np_array = np.array(Image.open(StringIO(jpeg_string))) if len(np_array.shape) == 3: np_array = np.transpose(np_array, (2, 0, 1)) return np_array + def preprocess_img(im, img_mean, crop_size, is_train, color=True): """ Does data augmentation for images. @@ -99,6 +107,7 @@ def preprocess_img(im, img_mean, crop_size, is_train, color=True): pic -= img_mean return pic.flatten() + def load_meta(meta_path, mean_img_size, crop_size, color=True): """ Return the loaded meta file. @@ -109,17 +118,18 @@ def load_meta(meta_path, mean_img_size, crop_size, color=True): mean = np.load(meta_path)['data_mean'] border = (mean_img_size - crop_size) / 2 if color: - assert(mean_img_size * mean_img_size * 3 == mean.shape[0]) + assert (mean_img_size * mean_img_size * 3 == mean.shape[0]) mean = mean.reshape(3, mean_img_size, mean_img_size) - mean = mean[:, border: border + crop_size, - border: border + crop_size].astype('float32') + mean = mean[:, border:border + crop_size, border:border + + crop_size].astype('float32') else: - assert(mean_img_size * mean_img_size == mean.shape[0]) + assert (mean_img_size * mean_img_size == mean.shape[0]) mean = mean.reshape(mean_img_size, mean_img_size) - mean = mean[border: border + crop_size, - border: border + crop_size].astype('float32') + mean = mean[border:border + crop_size, border:border + + crop_size].astype('float32') return mean + def load_image(img_path, is_color=True): """ Load image and return. @@ -130,6 +140,7 @@ def load_image(img_path, is_color=True): img.load() return img + def oversample(img, crop_dims): """ image : iterable of (H x W x K) ndarrays @@ -152,50 +163,53 @@ def oversample(img, crop_dims): for j in w_indices: crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) curr += 1 - crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ - -crop_dims / 2.0, - crop_dims / 2.0 - ]) + crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate( + [-crop_dims / 2.0, crop_dims / 2.0]) crops_ix = np.tile(crops_ix, (2, 1)) # Extract crops - crops = np.empty((10 * len(img), crop_dims[0], crop_dims[1], - im_shape[-1]), dtype=np.float32) + crops = np.empty( + (10 * len(img), crop_dims[0], crop_dims[1], im_shape[-1]), + dtype=np.float32) ix = 0 for im in img: for crop in crops_ix: crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] ix += 1 - crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors + crops[ix - 5:ix] = crops[ix - 5:ix, :, ::-1, :] # flip for mirrors return crops + class ImageTransformer: - def __init__(self, transpose = None, - channel_swap = None, mean = None, is_color = True): + def __init__(self, + transpose=None, + channel_swap=None, + mean=None, + is_color=True): self.transpose = transpose self.channel_swap = None self.mean = None - self.is_color = is_color + self.is_color = is_color - def set_transpose(self, order): + def set_transpose(self, order): if self.is_color: - assert 3 == len(order) + assert 3 == len(order) self.transpose = order - def set_channel_swap(self, order): + def set_channel_swap(self, order): if self.is_color: - assert 3 == len(order) + assert 3 == len(order) self.channel_swap = order def set_mean(self, mean): # mean value, may be one value per channel if mean.ndim == 1: - mean = mean[:, np.newaxis, np.newaxis] - else: + mean = mean[:, np.newaxis, np.newaxis] + else: # elementwise mean if self.is_color: assert len(mean.shape) == 3 - self.mean = mean + self.mean = mean def transformer(self, data): if self.transpose is not None: diff --git a/demo/image_classification/prediction.py b/demo/image_classification/prediction.py index 5d9e932658673..6a47bd5851c99 100755 --- a/demo/image_classification/prediction.py +++ b/demo/image_classification/prediction.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os,sys +import os, sys import numpy as np import logging from PIL import Image @@ -24,9 +24,11 @@ from paddle.trainer.PyDataProvider2 import dense_vector from paddle.trainer.config_parser import parse_config -logging.basicConfig(format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') +logging.basicConfig( + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') logging.getLogger().setLevel(logging.INFO) + class ImageClassifier(): def __init__(self, train_conf, @@ -58,18 +60,19 @@ def __init__(self, self.oversample = oversample self.is_color = is_color - self.transformer = image_util.ImageTransformer(is_color = is_color) - self.transformer.set_transpose((2,0,1)) + self.transformer = image_util.ImageTransformer(is_color=is_color) + self.transformer.set_transpose((2, 0, 1)) self.mean_file = mean_file mean = np.load(self.mean_file)['data_mean'] mean = mean.reshape(3, self.crop_dims[0], self.crop_dims[1]) - self.transformer.set_mean(mean) # mean pixel + self.transformer.set_mean(mean) # mean pixel gpu = 1 if use_gpu else 0 conf_args = "is_test=1,use_gpu=%d,is_predict=1" % (gpu) conf = parse_config(train_conf, conf_args) swig_paddle.initPaddle("--use_gpu=%d" % (gpu)) - self.network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) + self.network = swig_paddle.GradientMachine.createFromConfigProto( + conf.model_config) assert isinstance(self.network, swig_paddle.GradientMachine) self.network.loadParameters(self.model_dir) @@ -90,14 +93,14 @@ def get_data(self, img_path): # image_util.resize_image: short side is self.resize_dim image = image_util.resize_image(image, self.resize_dim) image = np.array(image) - input = np.zeros((1, image.shape[0], image.shape[1], 3), - dtype=np.float32) + input = np.zeros( + (1, image.shape[0], image.shape[1], 3), dtype=np.float32) input[0] = image.astype(np.float32) input = image_util.oversample(input, self.crop_dims) else: image = image.resize(self.crop_dims, Image.ANTIALIAS) - input = np.zeros((1, self.crop_dims[0], self.crop_dims[1], 3), - dtype=np.float32) + input = np.zeros( + (1, self.crop_dims[0], self.crop_dims[1], 3), dtype=np.float32) input[0] = np.array(image).astype(np.float32) data_in = [] @@ -133,22 +136,24 @@ def predict(self, image=None, output_layer=None): lab = np.argsort(-prob) logging.info("Label of %s is: %d", image, lab[0]) + if __name__ == '__main__': - image_size=32 - crop_size=32 - multi_crop=True - config="vgg_16_cifar.py" - output_layer="__fc_layer_1__" - mean_path="data/cifar-out/batches/batches.meta" - model_path=sys.argv[1] - image=sys.argv[2] - use_gpu=bool(int(sys.argv[3])) - - obj = ImageClassifier(train_conf=config, - model_dir=model_path, - resize_dim=image_size, - crop_dim=crop_size, - mean_file=mean_path, - use_gpu=use_gpu, - oversample=multi_crop) + image_size = 32 + crop_size = 32 + multi_crop = True + config = "vgg_16_cifar.py" + output_layer = "__fc_layer_1__" + mean_path = "data/cifar-out/batches/batches.meta" + model_path = sys.argv[1] + image = sys.argv[2] + use_gpu = bool(int(sys.argv[3])) + + obj = ImageClassifier( + train_conf=config, + model_dir=model_path, + resize_dim=image_size, + crop_dim=crop_size, + mean_file=mean_path, + use_gpu=use_gpu, + oversample=multi_crop) obj.predict(image, output_layer) diff --git a/demo/image_classification/preprocess.py b/demo/image_classification/preprocess.py index fe7ea19bf0277..10b9c1691b5e5 100755 --- a/demo/image_classification/preprocess.py +++ b/demo/image_classification/preprocess.py @@ -19,24 +19,36 @@ def option_parser(): parser = OptionParser(usage="usage: python preprcoess.py "\ "-i data_dir [options]") - parser.add_option("-i", "--input", action="store", - dest="input", help="Input data directory.") - parser.add_option("-s", "--size", action="store", - dest="size", help="Processed image size.") - parser.add_option("-c", "--color", action="store", - dest="color", help="whether to use color images.") + parser.add_option( + "-i", + "--input", + action="store", + dest="input", + help="Input data directory.") + parser.add_option( + "-s", + "--size", + action="store", + dest="size", + help="Processed image size.") + parser.add_option( + "-c", + "--color", + action="store", + dest="color", + help="whether to use color images.") return parser.parse_args() + if __name__ == '__main__': - options, args = option_parser() - data_dir = options.input - processed_image_size = int(options.size) - color = options.color == "1" - data_creator = ImageClassificationDatasetCreater(data_dir, - processed_image_size, - color) - data_creator.train_list_name = "train.txt" - data_creator.test_list_name = "test.txt" - data_creator.num_per_batch = 1000 - data_creator.overwrite = True - data_creator.create_batches() + options, args = option_parser() + data_dir = options.input + processed_image_size = int(options.size) + color = options.color == "1" + data_creator = ImageClassificationDatasetCreater( + data_dir, processed_image_size, color) + data_creator.train_list_name = "train.txt" + data_creator.test_list_name = "test.txt" + data_creator.num_per_batch = 1000 + data_creator.overwrite = True + data_creator.create_batches() diff --git a/demo/image_classification/vgg_16_cifar.py b/demo/image_classification/vgg_16_cifar.py index edd6988c48acd..58ceff5fc2f46 100755 --- a/demo/image_classification/vgg_16_cifar.py +++ b/demo/image_classification/vgg_16_cifar.py @@ -18,36 +18,38 @@ ####################Data Configuration ################## if not is_predict: - data_dir='data/cifar-out/batches/' - meta_path=data_dir+'batches.meta' - - args = {'meta':meta_path,'mean_img_size': 32, - 'img_size': 32,'num_classes': 10, - 'use_jpeg': 1,'color': "color"} - - define_py_data_sources2(train_list="train.list", - test_list="train.list", - module='image_provider', - obj='processData', - args=args) + data_dir = 'data/cifar-out/batches/' + meta_path = data_dir + 'batches.meta' + + args = { + 'meta': meta_path, + 'mean_img_size': 32, + 'img_size': 32, + 'num_classes': 10, + 'use_jpeg': 1, + 'color': "color" + } + + define_py_data_sources2( + train_list="train.list", + test_list="train.list", + module='image_provider', + obj='processData', + args=args) ######################Algorithm Configuration ############# settings( - batch_size = 128, - learning_rate = 0.1 / 128.0, - learning_method = MomentumOptimizer(0.9), - regularization = L2Regularization(0.0005 * 128) -) + batch_size=128, + learning_rate=0.1 / 128.0, + learning_method=MomentumOptimizer(0.9), + regularization=L2Regularization(0.0005 * 128)) #######################Network Configuration ############# -data_size=3*32*32 -label_size=10 -img = data_layer(name='image', - size=data_size) +data_size = 3 * 32 * 32 +label_size = 10 +img = data_layer(name='image', size=data_size) # small_vgg is predefined in trainer_config_helpers.networks -predict = small_vgg(input_image=img, - num_channels=3, - num_classes=label_size) +predict = small_vgg(input_image=img, num_channels=3, num_classes=label_size) if not is_predict: lbl = data_layer(name="label", size=label_size) diff --git a/demo/introduction/dataprovider.py b/demo/introduction/dataprovider.py index be8c0bc89156c..8515022e18dc6 100644 --- a/demo/introduction/dataprovider.py +++ b/demo/introduction/dataprovider.py @@ -15,10 +15,10 @@ from paddle.trainer.PyDataProvider2 import * import random + # define data types of input: 2 real numbers -@provider(input_types=[dense_vector(1), dense_vector(1)],use_seq=False) +@provider(input_types=[dense_vector(1), dense_vector(1)], use_seq=False) def process(settings, input_file): for i in xrange(2000): x = random.random() - yield [x], [2*x+0.3] - + yield [x], [2 * x + 0.3] diff --git a/demo/introduction/evaluate_model.py b/demo/introduction/evaluate_model.py index 8cfb843c42105..ca4a1872731ab 100755 --- a/demo/introduction/evaluate_model.py +++ b/demo/introduction/evaluate_model.py @@ -23,14 +23,17 @@ import numpy as np import os + def load(file_name): with open(file_name, 'rb') as f: - f.read(16) # skip header for float type. + f.read(16) # skip header for float type. return np.fromfile(f, dtype=np.float32) + def main(): print 'w=%.6f, b=%.6f from pass 29' % (load('output/pass-00029/w'), - load('output/pass-00029/b')) + load('output/pass-00029/b')) + if __name__ == '__main__': main() diff --git a/demo/introduction/trainer_config.py b/demo/introduction/trainer_config.py index 3e3df5583282a..7c838c1a8f5b3 100644 --- a/demo/introduction/trainer_config.py +++ b/demo/introduction/trainer_config.py @@ -16,9 +16,14 @@ # 1. read data. Suppose you saved above python code as dataprovider.py data_file = 'empty.list' -with open(data_file, 'w') as f: f.writelines(' ') -define_py_data_sources2(train_list=data_file, test_list=None, - module='dataprovider', obj='process',args={}) +with open(data_file, 'w') as f: + f.writelines(' ') +define_py_data_sources2( + train_list=data_file, + test_list=None, + module='dataprovider', + obj='process', + args={}) # 2. learning algorithm settings(batch_size=12, learning_rate=1e-3, learning_method=MomentumOptimizer()) @@ -26,7 +31,11 @@ # 3. Network configuration x = data_layer(name='x', size=1) y = data_layer(name='y', size=1) -y_predict = fc_layer(input=x, param_attr=ParamAttr(name='w'), size=1, act=LinearActivation(), bias_attr=ParamAttr(name='b')) +y_predict = fc_layer( + input=x, + param_attr=ParamAttr(name='w'), + size=1, + act=LinearActivation(), + bias_attr=ParamAttr(name='b')) cost = regression_cost(input=y_predict, label=y) outputs(cost) - diff --git a/demo/mnist/data/generate_list.py b/demo/mnist/data/generate_list.py index 1b929048b4d82..d880721f94c68 100644 --- a/demo/mnist/data/generate_list.py +++ b/demo/mnist/data/generate_list.py @@ -13,9 +13,9 @@ # limitations under the License. o = open("./" + "train.list", "w") -o.write("./data/raw_data/train" +"\n") +o.write("./data/raw_data/train" + "\n") o.close() o = open("./" + "test.list", "w") -o.write("./data/raw_data/t10k" +"\n") -o.close() \ No newline at end of file +o.write("./data/raw_data/t10k" + "\n") +o.close() diff --git a/demo/mnist/mnist_provider.py b/demo/mnist/mnist_provider.py index 32af29730a736..6df4676da3bdc 100644 --- a/demo/mnist/mnist_provider.py +++ b/demo/mnist/mnist_provider.py @@ -2,10 +2,9 @@ # Define a py data provider -@provider(input_types={ - 'pixel': dense_vector(28 * 28), - 'label': integer_value(10) -}) +@provider( + input_types={'pixel': dense_vector(28 * 28), + 'label': integer_value(10)}) def process(settings, filename): # settings is not used currently. imgf = filename + "-images-idx3-ubyte" labelf = filename + "-labels-idx1-ubyte" diff --git a/demo/mnist/vgg_16_mnist.py b/demo/mnist/vgg_16_mnist.py index 45a45bb061aa7..f9e89bc588aba 100644 --- a/demo/mnist/vgg_16_mnist.py +++ b/demo/mnist/vgg_16_mnist.py @@ -18,32 +18,29 @@ ####################Data Configuration ################## - if not is_predict: - data_dir='./data/' - define_py_data_sources2(train_list= data_dir + 'train.list', - test_list= data_dir + 'test.list', - module='mnist_provider', - obj='process') + data_dir = './data/' + define_py_data_sources2( + train_list=data_dir + 'train.list', + test_list=data_dir + 'test.list', + module='mnist_provider', + obj='process') ######################Algorithm Configuration ############# settings( - batch_size = 128, - learning_rate = 0.1 / 128.0, - learning_method = MomentumOptimizer(0.9), - regularization = L2Regularization(0.0005 * 128) -) + batch_size=128, + learning_rate=0.1 / 128.0, + learning_method=MomentumOptimizer(0.9), + regularization=L2Regularization(0.0005 * 128)) #######################Network Configuration ############# -data_size=1*28*28 -label_size=10 +data_size = 1 * 28 * 28 +label_size = 10 img = data_layer(name='pixel', size=data_size) # small_vgg is predined in trainer_config_helpers.network -predict = small_vgg(input_image=img, - num_channels=1, - num_classes=label_size) +predict = small_vgg(input_image=img, num_channels=1, num_classes=label_size) if not is_predict: lbl = data_layer(name="label", size=label_size) diff --git a/demo/model_zoo/embedding/extract_para.py b/demo/model_zoo/embedding/extract_para.py index 17067792fc38d..47e06fae9caa9 100755 --- a/demo/model_zoo/embedding/extract_para.py +++ b/demo/model_zoo/embedding/extract_para.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Example: python extract_para.py --preModel PREMODEL --preDict PREDICT \ @@ -29,6 +28,7 @@ from optparse import OptionParser import struct + def get_row_index(preDict, usrDict): """ Get the row positions for all words in user dictionary from pre-trained dictionary. @@ -47,7 +47,9 @@ def get_row_index(preDict, usrDict): pos.append(index[word]) return pos -def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, paraDim): + +def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, + paraDim): """ Extract desired parameters from a pretrained embedding model based on user dictionary """ @@ -70,6 +72,7 @@ def extract_parameters_by_usrDict(preModel, preDict, usrModel, usrDict, paraDim) print "extract parameters finish, total", len(rowIndex), "lines" fi.close() + def main(): """ Main entry for running paraconvert.py @@ -78,19 +81,33 @@ def main(): "python %prog --preModel PREMODEL --preDict PREDICT" \ " --usrModel USRMODEL --usrDict USRDICT -d DIM" parser = OptionParser(usage) - parser.add_option("--preModel", action="store", dest="preModel", - help="the name of pretrained embedding model") - parser.add_option("--preDict", action="store", dest="preDict", - help="the name of pretrained dictionary") - parser.add_option("--usrModel", action="store", dest="usrModel", - help="the name of output usr embedding model") - parser.add_option("--usrDict", action="store", dest="usrDict", - help="the name of user specified dictionary") - parser.add_option("-d", action="store", dest="dim", - help="dimension of parameter") + parser.add_option( + "--preModel", + action="store", + dest="preModel", + help="the name of pretrained embedding model") + parser.add_option( + "--preDict", + action="store", + dest="preDict", + help="the name of pretrained dictionary") + parser.add_option( + "--usrModel", + action="store", + dest="usrModel", + help="the name of output usr embedding model") + parser.add_option( + "--usrDict", + action="store", + dest="usrDict", + help="the name of user specified dictionary") + parser.add_option( + "-d", action="store", dest="dim", help="dimension of parameter") (options, args) = parser.parse_args() - extract_parameters_by_usrDict(options.preModel, options.preDict, - options.usrModel, options.usrDict, int(options.dim)) + extract_parameters_by_usrDict(options.preModel, options.preDict, + options.usrModel, options.usrDict, + int(options.dim)) + if __name__ == '__main__': main() diff --git a/demo/model_zoo/embedding/paraconvert.py b/demo/model_zoo/embedding/paraconvert.py index 523412303617a..54155eff8e26b 100755 --- a/demo/model_zoo/embedding/paraconvert.py +++ b/demo/model_zoo/embedding/paraconvert.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Example: python paraconvert.py --b2t -i INPUT -o OUTPUT -d DIM @@ -29,6 +28,7 @@ from optparse import OptionParser import struct + def binary2text(input, output, paraDim): """ Convert a binary parameter file of embedding model to be a text file. @@ -76,12 +76,13 @@ def binary2text(input, output, paraDim): fo.close() print "binary2text finish, total", line, "lines" + def get_para_count(input): """ Compute the total number of embedding parameters in input text file. input: the name of input text file """ - numRows = 1 + numRows = 1 paraDim = 0 with open(input) as f: line = f.readline() @@ -90,6 +91,7 @@ def get_para_count(input): numRows += 1 return numRows * paraDim + def text2binary(input, output, paddle_head=True): """ Convert a text parameter file of embedding model to be a binary file. @@ -123,6 +125,7 @@ def text2binary(input, output, paddle_head=True): fo.close() print "text2binary finish, total", count, "lines" + def main(): """ Main entry for running paraconvert.py @@ -131,21 +134,26 @@ def main(): "python %prog --b2t -i INPUT -o OUTPUT -d DIM \n" \ "python %prog --t2b -i INPUT -o OUTPUT" parser = OptionParser(usage) - parser.add_option("--b2t", action="store_true", - help="convert parameter file of embedding model from binary to text") - parser.add_option("--t2b", action="store_true", - help="convert parameter file of embedding model from text to binary") - parser.add_option("-i", action="store", dest="input", - help="input parameter file name") - parser.add_option("-o", action="store", dest="output", - help="output parameter file name") - parser.add_option("-d", action="store", dest="dim", - help="dimension of parameter") + parser.add_option( + "--b2t", + action="store_true", + help="convert parameter file of embedding model from binary to text") + parser.add_option( + "--t2b", + action="store_true", + help="convert parameter file of embedding model from text to binary") + parser.add_option( + "-i", action="store", dest="input", help="input parameter file name") + parser.add_option( + "-o", action="store", dest="output", help="output parameter file name") + parser.add_option( + "-d", action="store", dest="dim", help="dimension of parameter") (options, args) = parser.parse_args() if options.b2t: binary2text(options.input, options.output, options.dim) if options.t2b: text2binary(options.input, options.output) + if __name__ == '__main__': main() diff --git a/demo/model_zoo/resnet/classify.py b/demo/model_zoo/resnet/classify.py index 06d471722f805..7855126edcfec 100755 --- a/demo/model_zoo/resnet/classify.py +++ b/demo/model_zoo/resnet/classify.py @@ -26,16 +26,22 @@ from paddle.trainer.PyDataProvider2 import dense_vector from paddle.trainer.config_parser import parse_config -logging.basicConfig(format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') +logging.basicConfig( + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') logging.getLogger().setLevel(logging.INFO) + class ImageClassifier(): - def __init__(self, train_conf, model_dir=None, - resize_dim=256, crop_dim=224, + def __init__(self, + train_conf, + model_dir=None, + resize_dim=256, + crop_dim=224, use_gpu=True, mean_file=None, output_layer=None, - oversample=False, is_color=True): + oversample=False, + is_color=True): """ train_conf: network configure. model_dir: string, directory of model. @@ -62,24 +68,25 @@ def __init__(self, train_conf, model_dir=None, assert isinstance(self.output_layer, basestring) self.output_layer = self.output_layer.split(",") - self.transformer = image_util.ImageTransformer(is_color = is_color) - self.transformer.set_transpose((2,0,1)) - self.transformer.set_channel_swap((2,1,0)) + self.transformer = image_util.ImageTransformer(is_color=is_color) + self.transformer.set_transpose((2, 0, 1)) + self.transformer.set_channel_swap((2, 1, 0)) self.mean_file = mean_file if self.mean_file is not None: mean = np.load(self.mean_file)['data_mean'] mean = mean.reshape(3, self.crop_dims[0], self.crop_dims[1]) - self.transformer.set_mean(mean) # mean pixel + self.transformer.set_mean(mean) # mean pixel else: # if you use three mean value, set like: # this three mean value is calculated from ImageNet. - self.transformer.set_mean(np.array([103.939,116.779,123.68])) + self.transformer.set_mean(np.array([103.939, 116.779, 123.68])) conf_args = "is_test=1,use_gpu=%d,is_predict=1" % (int(use_gpu)) conf = parse_config(train_conf, conf_args) swig_paddle.initPaddle("--use_gpu=%d" % (int(use_gpu))) - self.network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) + self.network = swig_paddle.GradientMachine.createFromConfigProto( + conf.model_config) assert isinstance(self.network, swig_paddle.GradientMachine) self.network.loadParameters(self.model_dir) @@ -105,14 +112,14 @@ def get_data(self, img_path): # image_util.resize_image: short side is self.resize_dim image = image_util.resize_image(image, self.resize_dim) image = np.array(image) - input = np.zeros((1, image.shape[0], image.shape[1], 3), - dtype=np.float32) + input = np.zeros( + (1, image.shape[0], image.shape[1], 3), dtype=np.float32) input[0] = image.astype(np.float32) input = image_util.oversample(input, self.crop_dims) else: image = image.resize(self.crop_dims, Image.ANTIALIAS) - input = np.zeros((1, self.crop_dims[0], self.crop_dims[1], 3), - dtype=np.float32) + input = np.zeros( + (1, self.crop_dims[0], self.crop_dims[1], 3), dtype=np.float32) input[0] = np.array(image).astype(np.float32) data_in = [] @@ -172,7 +179,7 @@ def predict(self, data_file): logging.info("Label of %s is: %d", image, lab[0]) return results - def extract(self, data_file, output_dir, batch_size = 10000): + def extract(self, data_file, output_dir, batch_size=10000): """ extract and save features of output layers, which are specify in Outputs() in network configure. @@ -197,7 +204,7 @@ def extract(self, data_file, output_dir, batch_size = 10000): image_feature[file_name] = feature sample_num += 1 if sample_num == batch_size: - batch_name = os.path.join(output_dir, 'batch_%d' %(batch_num)) + batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) self.save_file(image_feature, batch_name) logging.info('Finish batch %d', batch_num) batch_num += 1 @@ -206,7 +213,7 @@ def extract(self, data_file, output_dir, batch_size = 10000): if idx % 1000 == 0: logging.info('%d/%d, %s', idx, len(image_files), file_name) if sample_num > 0: - batch_name = os.path.join(output_dir, 'batch_%d' %(batch_num)) + batch_name = os.path.join(output_dir, 'batch_%d' % (batch_num)) self.save_file(image_feature, batch_name) logging.info('Finish batch %d', batch_num) logging.info('Done: make image feature batch') @@ -215,38 +222,64 @@ def save_file(self, data, file): of = open(file, 'wb') cPickle.dump(data, of, protocol=cPickle.HIGHEST_PROTOCOL) + def option_parser(): """ Main entry for predciting """ usage = "%prog -c config -i data_list -w model_dir [options]" parser = OptionParser(usage="usage: %s" % usage) - parser.add_option("-j", "--job", - action="store", dest="job_type", - help="job type: predict, extract\ + parser.add_option( + "-j", + "--job", + action="store", + dest="job_type", + help="job type: predict, extract\ predict: predicting,\ extract: extract features") - parser.add_option("-c", "--conf", - action="store", dest="train_conf", - help="network config") - parser.add_option("-i", "--data", - action="store", dest="data_file", - help="image list") - parser.add_option("-w", "--model", - action="store", dest="model_path", - default=None, help="model path") - parser.add_option("-g", "--use_gpu", action="store", - dest="use_gpu", default=True, - help="Whether to use gpu mode.") - parser.add_option("-o", "--output_dir", - action="store", dest="output_dir", - default="output", help="output path") - parser.add_option("-m", "--mean", action="store", - dest="mean", default=None, - help="mean file.") - parser.add_option("-p", "--multi_crop", action="store_true", - dest="multi_crop", default=False, - help="Wether to use multiple crops on image.") + parser.add_option( + "-c", + "--conf", + action="store", + dest="train_conf", + help="network config") + parser.add_option( + "-i", "--data", action="store", dest="data_file", help="image list") + parser.add_option( + "-w", + "--model", + action="store", + dest="model_path", + default=None, + help="model path") + parser.add_option( + "-g", + "--use_gpu", + action="store", + dest="use_gpu", + default=True, + help="Whether to use gpu mode.") + parser.add_option( + "-o", + "--output_dir", + action="store", + dest="output_dir", + default="output", + help="output path") + parser.add_option( + "-m", + "--mean", + action="store", + dest="mean", + default=None, + help="mean file.") + parser.add_option( + "-p", + "--multi_crop", + action="store_true", + dest="multi_crop", + default=False, + help="Wether to use multiple crops on image.") parser.add_option("-l", "--output_layer", action="store", dest="output_layer", default=None, help="--job=extract, specify layers to extract "\ @@ -254,24 +287,26 @@ def option_parser(): "classification probability, output in resnet.py.") return parser.parse_args() + def main(): """ 1. parse input arguments. 2. predicting or extract features according job type. """ options, args = option_parser() - obj = ImageClassifier(options.train_conf, - options.model_path, - use_gpu=options.use_gpu, - mean_file=options.mean, - output_layer=options.output_layer, - oversample=options.multi_crop) + obj = ImageClassifier( + options.train_conf, + options.model_path, + use_gpu=options.use_gpu, + mean_file=options.mean, + output_layer=options.output_layer, + oversample=options.multi_crop) if options.job_type == "predict": obj.predict(options.data_file) elif options.job_type == "extract": - obj.extract(options.data_file, - options.output_dir) + obj.extract(options.data_file, options.output_dir) + if __name__ == '__main__': main() diff --git a/demo/model_zoo/resnet/example/__init__.py b/demo/model_zoo/resnet/example/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/demo/model_zoo/resnet/example/__init__.py +++ b/demo/model_zoo/resnet/example/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/demo/model_zoo/resnet/example/image_list_provider.py b/demo/model_zoo/resnet/example/image_list_provider.py index ee457e1fffc7e..9e415f76a5332 100644 --- a/demo/model_zoo/resnet/example/image_list_provider.py +++ b/demo/model_zoo/resnet/example/image_list_provider.py @@ -16,8 +16,7 @@ from paddle.trainer.PyDataProvider2 import * -def hook(settings, image_size, crop_size, color, file_list, - is_train, **kwargs): +def hook(settings, image_size, crop_size, color, file_list, is_train, **kwargs): """ Description: Init with a list of data file file_list is the name list of input files. @@ -58,7 +57,7 @@ def hook(settings, image_size, crop_size, color, file_list, sz = settings.crop_size * settings.crop_size settings.img_mean = np.zeros(sz * 3, dtype=np.single) for idx, value in enumerate(settings.mean_value): - settings.img_mean[idx * sz: (idx + 1) * sz] = value + settings.img_mean[idx * sz:(idx + 1) * sz] = value settings.img_mean = settings.img_mean.reshape(3, settings.crop_size, settings.crop_size) @@ -69,7 +68,8 @@ def hook(settings, image_size, crop_size, color, file_list, settings.input_types = [ dense_vector(settings.img_input_size), # image feature - integer_value(1)] # labels + integer_value(1) + ] # labels settings.logger.info('Image short side: %s', settings.img_size) settings.logger.info('Crop size: %s', settings.crop_size) @@ -97,9 +97,6 @@ def processData(settings, file_list): # swap channel if settings.is_swap_channel: img = img[settings.swap_channel, :, :] - img_feat = preprocess_img(img, - settings.img_mean, - settings.crop_size, - settings.is_train, - settings.color) + img_feat = preprocess_img(img, settings.img_mean, settings.crop_size, + settings.is_train, settings.color) yield img_feat.tolist(), int(lab.strip()) diff --git a/demo/model_zoo/resnet/load_feature.py b/demo/model_zoo/resnet/load_feature.py index ee4930b7a17f7..b0948b75fd0ac 100644 --- a/demo/model_zoo/resnet/load_feature.py +++ b/demo/model_zoo/resnet/load_feature.py @@ -17,9 +17,11 @@ import cPickle import logging -logging.basicConfig(format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') +logging.basicConfig( + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s') logging.getLogger().setLevel(logging.INFO) + def load_feature_c(file): """ Load feature extracted by C++ interface. @@ -30,14 +32,15 @@ def load_feature_c(file): f = open(file, 'r') for line in f: sample = [] - for slot in line.strip().split(";"): - fea = [float(val) for val in slot.strip().split()] + for slot in line.strip().split(";"): + fea = [float(val) for val in slot.strip().split()] if fea: sample.append(fea) features.append(sample) f.close() return features + def load_feature_py(feature_dir): """ Load feature extracted by python interface. @@ -54,6 +57,7 @@ def load_feature_py(feature_dir): logging.info('Load feature file %s', file_name) return features + if __name__ == '__main__': - print load_feature_py(sys.argv[1]) + print load_feature_py(sys.argv[1]) #print load_feature_c(sys.argv[1]) diff --git a/demo/model_zoo/resnet/resnet.py b/demo/model_zoo/resnet/resnet.py index 483e308ac804e..015b74cd48459 100644 --- a/demo/model_zoo/resnet/resnet.py +++ b/demo/model_zoo/resnet/resnet.py @@ -13,7 +13,6 @@ # limitations under the License. from paddle.trainer_config_helpers import * - """ paper: https://arxiv.org/abs/1512.03385 """ @@ -28,15 +27,19 @@ # mean.meta size : 3 x 224 x 224. # If you use three mean value, set like: # "mean_value:103.939,116.779,123.68;" - args={ + args = { 'mean_meta': "model/mean_meta_224/mean.meta", - 'image_size': 224, 'crop_size': 224, - 'color': True,'swap_channel:': [2, 1, 0]} - define_py_data_sources2(train_list, - 'example/test.list', - module="example.image_list_provider", - obj="processData", - args=args) + 'image_size': 224, + 'crop_size': 224, + 'color': True, + 'swap_channel:': [2, 1, 0] + } + define_py_data_sources2( + train_list, + 'example/test.list', + module="example.image_list_provider", + obj="processData", + args=args) batch_size = 1 learning_rate = 0.1 / batch_size @@ -54,12 +57,16 @@ learning_method='momentum', learning_rate_decay_a=0.5, learning_rate_decay_b=1200000 * 10, - learning_rate_schedule="discexp", -) + learning_rate_schedule="discexp", ) -def conv_bn_layer(name, input, filter_size, num_filters, - stride, padding, channels=None, +def conv_bn_layer(name, + input, + filter_size, + num_filters, + stride, + padding, + channels=None, active_type=ReluActivation()): """ A wrapper for conv layer with batch normalization layers. @@ -67,19 +74,18 @@ def conv_bn_layer(name, input, filter_size, num_filters, conv layer has no activation. """ - tmp = img_conv_layer(name=name + "_conv", - input=input, - filter_size=filter_size, - num_channels=channels, - num_filters=num_filters, - stride=stride, - padding=padding, - act=LinearActivation(), - bias_attr=False) - return batch_norm_layer(name=name + "_bn", - input=tmp, - act=active_type, - use_global_stats=is_test) + tmp = img_conv_layer( + name=name + "_conv", + input=input, + filter_size=filter_size, + num_channels=channels, + num_filters=num_filters, + stride=stride, + padding=padding, + act=LinearActivation(), + bias_attr=False) + return batch_norm_layer( + name=name + "_bn", input=tmp, act=active_type, use_global_stats=is_test) def bottleneck_block(name, input, num_filters1, num_filters2): @@ -88,29 +94,31 @@ def bottleneck_block(name, input, num_filters1, num_filters2): Last conv_bn_layer has no activation. Addto layer has activation of relu. """ - last_name = conv_bn_layer(name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=1, - padding=0) - last_name = conv_bn_layer(name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - last_name = conv_bn_layer(name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer(name=name + "_addto", - input=[input, last_name], - act=ReluActivation()) + last_name = conv_bn_layer( + name=name + '_branch2a', + input=input, + filter_size=1, + num_filters=num_filters1, + stride=1, + padding=0) + last_name = conv_bn_layer( + name=name + '_branch2b', + input=last_name, + filter_size=3, + num_filters=num_filters1, + stride=1, + padding=1) + last_name = conv_bn_layer( + name=name + '_branch2c', + input=last_name, + filter_size=1, + num_filters=num_filters2, + stride=1, + padding=0, + active_type=LinearActivation()) + + return addto_layer( + name=name + "_addto", input=[input, last_name], act=ReluActivation()) def mid_projection(name, input, num_filters1, num_filters2, stride=2): @@ -123,38 +131,41 @@ def mid_projection(name, input, num_filters1, num_filters2, stride=2): branch2x: bottleneck building block, shortcuts are identity. """ # stride = 2 - branch1 = conv_bn_layer(name=name + '_branch1', - input=input, - filter_size=1, - num_filters=num_filters2, - stride=stride, - padding=0, - active_type=LinearActivation()) - - last_name = conv_bn_layer(name=name + '_branch2a', - input=input, - filter_size=1, - num_filters=num_filters1, - stride=stride, - padding=0) - last_name = conv_bn_layer(name=name + '_branch2b', - input=last_name, - filter_size=3, - num_filters=num_filters1, - stride=1, - padding=1) - - last_name = conv_bn_layer(name=name + '_branch2c', - input=last_name, - filter_size=1, - num_filters=num_filters2, - stride=1, - padding=0, - active_type=LinearActivation()) - - return addto_layer(name=name + "_addto", - input=[branch1, last_name], - act=ReluActivation()) + branch1 = conv_bn_layer( + name=name + '_branch1', + input=input, + filter_size=1, + num_filters=num_filters2, + stride=stride, + padding=0, + active_type=LinearActivation()) + + last_name = conv_bn_layer( + name=name + '_branch2a', + input=input, + filter_size=1, + num_filters=num_filters1, + stride=stride, + padding=0) + last_name = conv_bn_layer( + name=name + '_branch2b', + input=last_name, + filter_size=3, + num_filters=num_filters1, + stride=1, + padding=1) + + last_name = conv_bn_layer( + name=name + '_branch2c', + input=last_name, + filter_size=1, + num_filters=num_filters2, + stride=1, + padding=0, + active_type=LinearActivation()) + + return addto_layer( + name=name + "_addto", input=[branch1, last_name], act=ReluActivation()) def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3): @@ -168,67 +179,67 @@ def deep_res_net(res2_num=3, res3_num=4, res4_num=6, res5_num=3): # For ImageNet # conv1: 112x112 img = data_layer(name='input', size=224 * 224 * 3) - tmp = conv_bn_layer("conv1", img, - filter_size=7, - channels=3, - num_filters=64, - stride=2, - padding=3) + tmp = conv_bn_layer( + "conv1", + img, + filter_size=7, + channels=3, + num_filters=64, + stride=2, + padding=3) tmp = img_pool_layer(name="pool1", input=tmp, pool_size=3, stride=2) # conv2_x: 56x56 - tmp = mid_projection(name="res2_1", - input=tmp, - num_filters1=64, - num_filters2=256, - stride=1) + tmp = mid_projection( + name="res2_1", input=tmp, num_filters1=64, num_filters2=256, stride=1) for i in xrange(2, res2_num + 1, 1): - tmp = bottleneck_block(name="res2_" + str(i), - input=tmp, - num_filters1=64, - num_filters2=256) + tmp = bottleneck_block( + name="res2_" + str(i), input=tmp, num_filters1=64, num_filters2=256) # conv3_x: 28x28 - tmp = mid_projection(name="res3_1", - input=tmp, - num_filters1=128, - num_filters2=512) + tmp = mid_projection( + name="res3_1", input=tmp, num_filters1=128, num_filters2=512) for i in xrange(2, res3_num + 1, 1): - tmp = bottleneck_block(name="res3_" + str(i), - input=tmp, num_filters1=128, - num_filters2=512) + tmp = bottleneck_block( + name="res3_" + str(i), + input=tmp, + num_filters1=128, + num_filters2=512) # conv4_x: 14x14 - tmp = mid_projection(name="res4_1", input=tmp, - num_filters1=256, num_filters2=1024) + tmp = mid_projection( + name="res4_1", input=tmp, num_filters1=256, num_filters2=1024) for i in xrange(2, res4_num + 1, 1): - tmp = bottleneck_block(name="res4_" + str(i), - input=tmp, - num_filters1=256, - num_filters2=1024) + tmp = bottleneck_block( + name="res4_" + str(i), + input=tmp, + num_filters1=256, + num_filters2=1024) # conv5_x: 7x7 - tmp = mid_projection(name="res5_1", input=tmp, - num_filters1=512, num_filters2=2048) + tmp = mid_projection( + name="res5_1", input=tmp, num_filters1=512, num_filters2=2048) for i in xrange(2, res5_num + 1, 1): - tmp = bottleneck_block(name="res5_" + str(i), - input=tmp, num_filters1=512, - num_filters2=2048) - - tmp = img_pool_layer(name='avgpool', - input=tmp, - pool_size=7, - stride=1, - pool_type=AvgPooling()) - - output = fc_layer(name='output', - input=tmp, - size=1000, - act=SoftmaxActivation()) + tmp = bottleneck_block( + name="res5_" + str(i), + input=tmp, + num_filters1=512, + num_filters2=2048) + + tmp = img_pool_layer( + name='avgpool', + input=tmp, + pool_size=7, + stride=1, + pool_type=AvgPooling()) + + output = fc_layer( + name='output', input=tmp, size=1000, act=SoftmaxActivation()) if not is_predict: - classification_cost(input=output, label=data_layer(name='label', - size=1)) + classification_cost( + input=output, label=data_layer( + name='label', size=1)) def res_net_50(): diff --git a/demo/quick_start/api_train.py b/demo/quick_start/api_train.py index 5ae19b8d26534..66cbb856484d2 100644 --- a/demo/quick_start/api_train.py +++ b/demo/quick_start/api_train.py @@ -22,27 +22,32 @@ from paddle.trainer.PyDataProvider2 \ import integer_value, integer_value_sequence, sparse_binary_vector + def parse_arguments(): parser = argparse.ArgumentParser() - parser.add_argument("--train_data", - type=str, required=False, help="train data file") + parser.add_argument( + "--train_data", type=str, required=False, help="train data file") parser.add_argument("--test_data", type=str, help="test data file") - parser.add_argument("--config", - type=str, required=True, help="config file name") + parser.add_argument( + "--config", type=str, required=True, help="config file name") parser.add_argument("--dict_file", required=True, help="dictionary file") - parser.add_argument("--seq", - default=1, type=int, - help="whether use sequence training") - parser.add_argument("--use_gpu", default=0, type=int, - help="whether use GPU for training") - parser.add_argument("--trainer_count", default=1, type=int, - help="Number of threads for training") - parser.add_argument("--num_passes", default=5, type=int, - help="Number of training passes") + parser.add_argument( + "--seq", default=1, type=int, help="whether use sequence training") + parser.add_argument( + "--use_gpu", default=0, type=int, help="whether use GPU for training") + parser.add_argument( + "--trainer_count", + default=1, + type=int, + help="Number of threads for training") + parser.add_argument( + "--num_passes", default=5, type=int, help="Number of training passes") return parser.parse_args() + UNK_IDX = 0 + def load_data(file_name, word_dict): with open(file_name, 'r') as f: for line in f: @@ -51,6 +56,7 @@ def load_data(file_name, word_dict): word_slot = [word_dict.get(w, UNK_IDX) for w in words] yield word_slot, int(label) + def load_dict(dict_file): word_dict = dict() with open(dict_file, 'r') as f: @@ -59,6 +65,7 @@ def load_dict(dict_file): word_dict[w] = i return word_dict + def main(): options = parse_arguments() api.initPaddle("--use_gpu=%s" % options.use_gpu, @@ -86,9 +93,9 @@ def main(): # create a data converter which converts data to PaddlePaddle # internal format input_types = [ - integer_value_sequence(len(word_dict)) if options.seq - else sparse_binary_vector(len(word_dict)), - integer_value(2)] + integer_value_sequence(len(word_dict)) if options.seq else + sparse_binary_vector(len(word_dict)), integer_value(2) + ] converter = DataProviderConverter(input_types) batch_size = trainer_config.opt_config.batch_size @@ -102,7 +109,7 @@ def main(): trainer.trainOneDataBatch(size, converter(batch)) trainer.finishTrainPass() if test_dataset: - trainer.startTestPeriod(); + trainer.startTestPeriod() for pos in xrange(0, len(test_dataset), batch_size): batch = itertools.islice(test_dataset, pos, pos + batch_size) size = min(batch_size, len(test_dataset) - pos) @@ -110,5 +117,6 @@ def main(): trainer.finishTestPeriod() trainer.finishTrain() + if __name__ == '__main__': main() diff --git a/demo/quick_start/dataprovider_bow.py b/demo/quick_start/dataprovider_bow.py index f8cde189cf87d..a5156a2d40cc0 100644 --- a/demo/quick_start/dataprovider_bow.py +++ b/demo/quick_start/dataprovider_bow.py @@ -17,6 +17,7 @@ # id of the word not in dictionary UNK_IDX = 0 + # initializer is called by the framework during initialization. # It allows the user to describe the data types and setup the # necessary data structure for later use. @@ -38,7 +39,9 @@ def initializer(settings, dictionary, **kwargs): # The second input is an integer. It represents the category id of the # sample. 2 means there are two labels in the dataset. # (1 for positive and 0 for negative) - integer_value(2)] + integer_value(2) + ] + # Delaring a data provider. It has an initializer 'data_initialzer'. # It will cache the generated data of the first pass in memory, so that @@ -69,9 +72,8 @@ def process(settings, file_name): def predict_initializer(settings, dictionary, **kwargs): settings.word_dict = dictionary - settings.input_types = [ - sparse_binary_vector(len(dictionary)) - ] + settings.input_types = [sparse_binary_vector(len(dictionary))] + # Declaring a data provider for prediction. The difference with process # is that label is not generated. diff --git a/demo/quick_start/dataprovider_emb.py b/demo/quick_start/dataprovider_emb.py index f5632d5f3f8bd..286f3f5c82081 100755 --- a/demo/quick_start/dataprovider_emb.py +++ b/demo/quick_start/dataprovider_emb.py @@ -24,7 +24,8 @@ def initializer(settings, dictionary, **kwargs): # The value of the integers range from 0 to len(dictrionary)-1 integer_value_sequence(len(dictionary)), # Define the second input for label id - integer_value(2)] + integer_value(2) + ] @provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) @@ -40,7 +41,8 @@ def process(settings, file_name): def predict_initializer(settings, dictionary, **kwargs): settings.word_dict = dictionary settings.input_types = [ - integer_value(len(dictionary), seq_type=SequenceType.SEQUENCE) + integer_value( + len(dictionary), seq_type=SequenceType.SEQUENCE) ] diff --git a/demo/quick_start/preprocess.py b/demo/quick_start/preprocess.py index 69fdbe44b5245..d87fad632a742 100755 --- a/demo/quick_start/preprocess.py +++ b/demo/quick_start/preprocess.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ 1. (remove HTML before or not)tokensizing 2. pos sample : rating score 5; neg sample: rating score 1-2. @@ -35,7 +34,8 @@ batch_size = 5000 word_count = {} -num_tokenize = max(1, multiprocessing.cpu_count() - 2) # parse + tokenize + save +num_tokenize = max(1, + multiprocessing.cpu_count() - 2) # parse + tokenize + save max_queue_size = 8 parse_queue = Queue(maxsize=max_queue_size + num_tokenize) tokenize_queue = Queue(maxsize=max_queue_size + num_tokenize) diff --git a/demo/quick_start/trainer_config.bidi-lstm.py b/demo/quick_start/trainer_config.bidi-lstm.py index 3be3d37342271..51deaf31f9468 100644 --- a/demo/quick_start/trainer_config.bidi-lstm.py +++ b/demo/quick_start/trainer_config.bidi-lstm.py @@ -27,11 +27,12 @@ trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -39,19 +40,17 @@ learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + gradient_clipping_threshold=25) -bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) +bias_attr = ParamAttr(initial_std=0., l2_rate=0.) data = data_layer(name="word", size=len(word_dict)) emb = embedding_layer(input=data, size=128) bi_lstm = bidirectional_lstm(input=emb, size=128) dropout = dropout_layer(input=bi_lstm, dropout_rate=0.5) -output = fc_layer(input=dropout, size=2, - bias_attr=bias_attr, - act=SoftmaxActivation()) +output = fc_layer( + input=dropout, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(output) diff --git a/demo/quick_start/trainer_config.cnn.py b/demo/quick_start/trainer_config.cnn.py index 253ec0aee26cf..388efa75f903e 100644 --- a/demo/quick_start/trainer_config.cnn.py +++ b/demo/quick_start/trainer_config.cnn.py @@ -27,11 +27,12 @@ trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -39,8 +40,7 @@ learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + gradient_clipping_threshold=25) data = data_layer(name="word", size=len(word_dict)) embedding = embedding_layer(input=data, size=128) diff --git a/demo/quick_start/trainer_config.db-lstm.py b/demo/quick_start/trainer_config.db-lstm.py index b35bdf5a61b47..02bc898d881ef 100644 --- a/demo/quick_start/trainer_config.db-lstm.py +++ b/demo/quick_start/trainer_config.db-lstm.py @@ -27,11 +27,12 @@ trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -39,10 +40,9 @@ learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + gradient_clipping_threshold=25) -bias_attr = ParamAttr(initial_std=0.,l2_rate=0.) +bias_attr = ParamAttr(initial_std=0., l2_rate=0.) data = data_layer(name="word", size=len(word_dict)) emb = embedding_layer(input=data, size=128) @@ -52,17 +52,18 @@ input_layers = [hidden_0, lstm_0] -for i in range(1,8): +for i in range(1, 8): fc = fc_layer(input=input_layers, size=128) - lstm = lstmemory(input=fc, layer_attr=ExtraAttr(drop_rate=0.1), - reverse=(i % 2) == 1,) + lstm = lstmemory( + input=fc, + layer_attr=ExtraAttr(drop_rate=0.1), + reverse=(i % 2) == 1, ) input_layers = [fc, lstm] lstm_last = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_last, size=2, - bias_attr=bias_attr, - act=SoftmaxActivation()) +output = fc_layer( + input=lstm_last, size=2, bias_attr=bias_attr, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(output) diff --git a/demo/quick_start/trainer_config.emb.py b/demo/quick_start/trainer_config.emb.py index 34dd7b96f2f14..8fd18a7aac704 100644 --- a/demo/quick_start/trainer_config.emb.py +++ b/demo/quick_start/trainer_config.emb.py @@ -27,18 +27,16 @@ trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( - batch_size=batch_size, - learning_rate=2e-3, - learning_method=AdamOptimizer() -) + batch_size=batch_size, learning_rate=2e-3, learning_method=AdamOptimizer()) data = data_layer(name="word", size=len(word_dict)) embedding = embedding_layer(input=data, size=128) diff --git a/demo/quick_start/trainer_config.lr.py b/demo/quick_start/trainer_config.lr.py index c6059947f30b3..b9c9441baac28 100644 --- a/demo/quick_start/trainer_config.lr.py +++ b/demo/quick_start/trainer_config.lr.py @@ -32,11 +32,12 @@ # We need to use different process for training and prediction. # For training, the input data includes both word IDs and labels. # For prediction, the input data only includs word Ids. -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_bow", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_bow", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -44,8 +45,7 @@ learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + gradient_clipping_threshold=25) # Define the data for text features. The size of the data layer is the number # of words in the dictionary. diff --git a/demo/quick_start/trainer_config.lstm.py b/demo/quick_start/trainer_config.lstm.py index b412a9cbd914d..8821e02d9bd4a 100644 --- a/demo/quick_start/trainer_config.lstm.py +++ b/demo/quick_start/trainer_config.lstm.py @@ -27,11 +27,12 @@ trn = 'data/train.list' if not is_predict else None tst = 'data/test.list' if not is_predict else 'data/pred.list' process = 'process' if not is_predict else 'process_predict' -define_py_data_sources2(train_list=trn, - test_list=tst, - module="dataprovider_emb", - obj=process, - args={"dictionary": word_dict}) +define_py_data_sources2( + train_list=trn, + test_list=tst, + module="dataprovider_emb", + obj=process, + args={"dictionary": word_dict}) batch_size = 128 if not is_predict else 1 settings( @@ -39,17 +40,14 @@ learning_rate=2e-3, learning_method=AdamOptimizer(), regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) - + gradient_clipping_threshold=25) data = data_layer(name="word", size=len(word_dict)) emb = embedding_layer(input=data, size=128) -lstm = simple_lstm(input=emb, size=128, - lstm_cell_attr=ExtraAttr(drop_rate=0.25)) +lstm = simple_lstm( + input=emb, size=128, lstm_cell_attr=ExtraAttr(drop_rate=0.25)) lstm_max = pooling_layer(input=lstm, pooling_type=MaxPooling()) -output = fc_layer(input=lstm_max, size=2, - act=SoftmaxActivation()) +output = fc_layer(input=lstm_max, size=2, act=SoftmaxActivation()) if is_predict: maxid = maxid_layer(output) outputs([maxid, output]) diff --git a/demo/recommendation/common_utils.py b/demo/recommendation/common_utils.py index a5f00b3ef9ca0..613e36b496e47 100755 --- a/demo/recommendation/common_utils.py +++ b/demo/recommendation/common_utils.py @@ -21,8 +21,9 @@ def meta_to_header(meta, name): yield integer_value(each_meta['max']) elif each_meta['type'] == 'embedding': is_seq = each_meta['seq'] == 'sequence' - yield integer_value(len(each_meta['dict']), - seq_type=SequenceType.SEQUENCE if is_seq - else SequenceType.NO_SEQUENCE) + yield integer_value( + len(each_meta['dict']), + seq_type=SequenceType.SEQUENCE + if is_seq else SequenceType.NO_SEQUENCE) elif each_meta['type'] == 'one_hot_dense': yield dense_vector(len(each_meta['dict'])) diff --git a/demo/recommendation/data/config_generator.py b/demo/recommendation/data/config_generator.py index 29f38082693ad..fa605458300f8 100644 --- a/demo/recommendation/data/config_generator.py +++ b/demo/recommendation/data/config_generator.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ config_generator.py @@ -29,10 +28,7 @@ import docopt import copy -DEFAULT_FILE = { - "type": "split", - "delimiter": "," -} +DEFAULT_FILE = {"type": "split", "delimiter": ","} DEFAULT_FIELD = { "id": { @@ -107,19 +103,16 @@ def main(filename, fmt): field = copy.deepcopy(DEFAULT_FIELD[field_key]) field['pos'] = pos fields.append(field) - obj[k] = { - "file": file_dict, - "fields": fields - } - meta = { - "meta": obj - } + obj[k] = {"file": file_dict, "fields": fields} + meta = {"meta": obj} # print meta if fmt == 'json': + def formatter(x): import json return json.dumps(x, indent=2) elif fmt == 'yaml': + def formatter(x): import yaml return yaml.safe_dump(x, default_flow_style=False) diff --git a/demo/recommendation/data/meta_generator.py b/demo/recommendation/data/meta_generator.py index 8d1a33d02aea1..593c863670d5e 100644 --- a/demo/recommendation/data/meta_generator.py +++ b/demo/recommendation/data/meta_generator.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Preprocess Movielens dataset, to get movie/user object. @@ -66,8 +65,8 @@ def scan(self, key): self.__key_set__.add(key) def finish_scan(self, compare=None, key=None, reverse=False): - self.__key_set__ = sorted(list(self.__key_set__), cmp=compare, - key=key, reverse=reverse) + self.__key_set__ = sorted( + list(self.__key_set__), cmp=compare, key=key, reverse=reverse) self.dict = dict() for idx, each_key in enumerate(self.__key_set__): self.dict[each_key] = idx @@ -207,11 +206,10 @@ def __init__(self, config): self.dict = EmbeddingFieldParser.CharBasedEmbeddingDict( self.seq_type == EmbeddingFieldParser.SEQUENCE) elif config['dict']['type'] == 'split': - self.dict = SplitEmbeddingDict( - config['dict'].get('delimiter', ',')) + self.dict = SplitEmbeddingDict(config['dict'].get('delimiter', ',')) elif config['dict']['type'] == 'whole_content': - self.dict = EmbeddingFieldParser.WholeContentDict( - config['dict']['sort']) + self.dict = EmbeddingFieldParser.WholeContentDict(config['dict'][ + 'sort']) else: print config assert False @@ -333,8 +331,8 @@ def create(config): return PositionContentExtractor(config['pos']) else: extra_args = config['regex'] - return RegexPositionContentExtractor(pos=config['pos'], - **extra_args) + return RegexPositionContentExtractor( + pos=config['pos'], **extra_args) class MetaFile(object): @@ -364,9 +362,10 @@ def parse(self, config): metas = map(lambda x: x.meta_field(), field_parsers) # print metas - key_index = filter(lambda x: x is not None, map( - lambda (idx, meta): idx if 'is_key' in meta and meta['is_key'] - else None, enumerate(metas)))[0] + key_index = filter( + lambda x: x is not None, + map(lambda (idx, meta): idx if 'is_key' in meta and meta['is_key'] else None, + enumerate(metas)))[0] key_map = [] for i in range(min(key_index, len(metas))): @@ -374,12 +373,7 @@ def parse(self, config): for i in range(key_index + 1, len(metas)): key_map.append(i) - obj = { - '__meta__': { - 'raw_meta': metas, - 'feature_map': key_map - } - } + obj = {'__meta__': {'raw_meta': metas, 'feature_map': key_map}} for each_block in reader.read(): idx = field_parsers[key_index].parse(each_block) diff --git a/demo/recommendation/data/split.py b/demo/recommendation/data/split.py index ff1f7fab7befd..8dd0cbd32af60 100644 --- a/demo/recommendation/data/split.py +++ b/demo/recommendation/data/split.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Separate movielens 1m dataset to train/test file. diff --git a/demo/recommendation/dataprovider.py b/demo/recommendation/dataprovider.py index 454467f40b44b..ff3932be03f1e 100755 --- a/demo/recommendation/dataprovider.py +++ b/demo/recommendation/dataprovider.py @@ -15,6 +15,7 @@ from paddle.trainer.PyDataProvider2 import * import common_utils # parse + def hook(settings, meta, **kwargs): """ Init hook is invoked before process data. It will set obj.slots and store @@ -41,6 +42,7 @@ def hook(settings, meta, **kwargs): settings.input_types = headers settings.meta = meta + @provider(init_hook=hook, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, filename): with open(filename, 'r') as f: diff --git a/demo/recommendation/prediction.py b/demo/recommendation/prediction.py index f8044a3195ec2..e2a202cfd1a47 100755 --- a/demo/recommendation/prediction.py +++ b/demo/recommendation/prediction.py @@ -28,7 +28,8 @@ model_path = sys.argv[1] swig_paddle.initPaddle('--use_gpu=0') conf = parse_config("trainer_config.py", "is_predict=1") - network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) + network = swig_paddle.GradientMachine.createFromConfigProto( + conf.model_config) assert isinstance(network, swig_paddle.GradientMachine) network.loadParameters(model_path) with open('./data/meta.bin', 'rb') as f: @@ -39,11 +40,12 @@ while True: movie_id = int(raw_input("Input movie_id: ")) user_id = int(raw_input("Input user_id: ")) - movie_meta = meta['movie'][movie_id] # Query Data From Meta. + movie_meta = meta['movie'][movie_id] # Query Data From Meta. user_meta = meta['user'][user_id] data = [movie_id - 1] data.extend(movie_meta) data.append(user_id - 1) data.extend(user_meta) - print "Prediction Score is %.2f" % ((network.forwardTest( - cvt.convert([data]))[0]['value'][0][0] + 5) / 2) + print "Prediction Score is %.2f" % ( + (network.forwardTest(cvt.convert([data]))[0]['value'][0][0] + 5) + / 2) diff --git a/demo/recommendation/trainer_config.py b/demo/recommendation/trainer_config.py index 624c22ec969dc..cec340b0b65a8 100755 --- a/demo/recommendation/trainer_config.py +++ b/demo/recommendation/trainer_config.py @@ -27,8 +27,8 @@ # load meta file meta = pickle.load(f) -settings(batch_size=1600, learning_rate=1e-3, - learning_method=RMSPropOptimizer()) +settings( + batch_size=1600, learning_rate=1e-3, learning_method=RMSPropOptimizer()) def construct_feature(name): @@ -59,11 +59,10 @@ def construct_feature(name): slot_name = each_meta.get('name', '%s_id' % name) if type_name == 'id': slot_dim = each_meta['max'] - embedding = embedding_layer(input=data_layer(slot_name, - size=slot_dim), - size=256) - fusion.append(fc_layer(input=embedding, - size=256)) + embedding = embedding_layer( + input=data_layer( + slot_name, size=slot_dim), size=256) + fusion.append(fc_layer(input=embedding, size=256)) elif type_name == 'embedding': is_seq = each_meta['seq'] == 'sequence' slot_dim = len(each_meta['dict']) @@ -71,17 +70,14 @@ def construct_feature(name): embedding = embedding_layer(input=din, size=256) if is_seq: fusion.append( - text_conv_pool(input=embedding, context_len=5, - hidden_size=256)) + text_conv_pool( + input=embedding, context_len=5, hidden_size=256)) else: - fusion.append(fc_layer(input=embedding, - size=256)) + fusion.append(fc_layer(input=embedding, size=256)) elif type_name == 'one_hot_dense': slot_dim = len(each_meta['dict']) - hidden = fc_layer(input=data_layer(slot_name, slot_dim), - size=256) - fusion.append(fc_layer(input=hidden, - size=256)) + hidden = fc_layer(input=data_layer(slot_name, slot_dim), size=256) + fusion.append(fc_layer(input=hidden, size=256)) return fc_layer(name="%s_fusion" % name, input=fusion, size=256) @@ -90,10 +86,16 @@ def construct_feature(name): user_feature = construct_feature("user") similarity = cos_sim(a=movie_feature, b=user_feature) if not is_predict: - outputs(regression_cost(input=similarity, - label=data_layer('rating', size=1))) - - define_py_data_sources2('data/train.list', 'data/test.list', module='dataprovider', - obj='process', args={'meta': meta}) + outputs( + regression_cost( + input=similarity, label=data_layer( + 'rating', size=1))) + + define_py_data_sources2( + 'data/train.list', + 'data/test.list', + module='dataprovider', + obj='process', + args={'meta': meta}) else: outputs(similarity) diff --git a/demo/semantic_role_labeling/dataprovider.py b/demo/semantic_role_labeling/dataprovider.py index 2ef25c42c1794..5c003584a52d4 100644 --- a/demo/semantic_role_labeling/dataprovider.py +++ b/demo/semantic_role_labeling/dataprovider.py @@ -26,9 +26,9 @@ def hook(settings, word_dict, label_dict, **kwargs): integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), integer_value_sequence(len(word_dict)), - integer_value_sequence(len(word_dict)), - integer_value_sequence(2), - integer_value_sequence(len(label_dict))] + integer_value_sequence(len(word_dict)), integer_value_sequence(2), + integer_value_sequence(len(label_dict)) + ] @provider(init_hook=hook) diff --git a/demo/semantic_role_labeling/db_lstm.py b/demo/semantic_role_labeling/db_lstm.py index 364460afbe31c..e3f6edad69721 100644 --- a/demo/semantic_role_labeling/db_lstm.py +++ b/demo/semantic_role_labeling/db_lstm.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - import math import os import sys @@ -42,7 +41,7 @@ label_dict[w] = i if is_test: - train_list_file = None + train_list_file = None #define data provider define_py_data_sources2( diff --git a/demo/semantic_role_labeling/predict.py b/demo/semantic_role_labeling/predict.py index 9a27112828e44..f051d4175cf6f 100644 --- a/demo/semantic_role_labeling/predict.py +++ b/demo/semantic_role_labeling/predict.py @@ -41,22 +41,16 @@ def __init__(self, train_conf, dict_file, model_dir, label_file): len_dict = len(self.dict) len_label = len(self.labels) - conf = parse_config( - train_conf, - 'dict_len=' + str(len_dict) + - ',label_len=' + str(len_label) + - ',is_predict=True') + conf = parse_config(train_conf, 'dict_len=' + str(len_dict) + + ',label_len=' + str(len_label) + ',is_predict=True') self.network = swig_paddle.GradientMachine.createFromConfigProto( conf.model_config) self.network.loadParameters(model_dir) slots = [ - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(len_dict), - integer_value_sequence(2) + integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_dict), integer_value_sequence(len_dict), + integer_value_sequence(len_dict), integer_value_sequence(2) ] self.converter = DataProviderConverter(slots) @@ -110,8 +104,8 @@ def predict(self, data_file): len_sen = len(sen.split()) line_labels = lab[index:index + len_sen] index += len_sen - fout.write(sen + '\t' + ' '.join([self.labels_reverse[ - i] for i in line_labels]) + '\n') + fout.write(sen + '\t' + ' '.join( + [self.labels_reverse[i] for i in line_labels]) + '\n') def option_parser(): diff --git a/demo/sentiment/dataprovider.py b/demo/sentiment/dataprovider.py index 9a9fd81f030cb..53e3d1d20df92 100755 --- a/demo/sentiment/dataprovider.py +++ b/demo/sentiment/dataprovider.py @@ -17,8 +17,8 @@ def hook(settings, dictionary, **kwargs): settings.word_dict = dictionary settings.input_types = [ - integer_value_sequence(len(settings.word_dict)), - integer_value(2)] + integer_value_sequence(len(settings.word_dict)), integer_value(2) + ] settings.logger.info('dict len : %d' % (len(settings.word_dict))) @@ -29,6 +29,7 @@ def process(settings, file_name): label, comment = line.strip().split('\t\t') label = int(label) words = comment.split() - word_slot = [settings.word_dict[w] for w in words if w in - settings.word_dict] + word_slot = [ + settings.word_dict[w] for w in words if w in settings.word_dict + ] yield word_slot, label diff --git a/demo/sentiment/predict.py b/demo/sentiment/predict.py index 7d0baeabbba68..bc0f6f3126429 100755 --- a/demo/sentiment/predict.py +++ b/demo/sentiment/predict.py @@ -18,14 +18,14 @@ from py_paddle import swig_paddle, DataProviderConverter from paddle.trainer.PyDataProvider2 import integer_value_sequence from paddle.trainer.config_parser import parse_config - """ Usage: run following command to show help message. python predict.py -h """ + class SentimentPrediction(): - def __init__(self, train_conf, dict_file, model_dir=None, label_file = None): + def __init__(self, train_conf, dict_file, model_dir=None, label_file=None): """ train_conf: trainer configure. dict_file: word dictionary file name. @@ -44,7 +44,8 @@ def __init__(self, train_conf, dict_file, model_dir=None, label_file = None): self.load_label(label_file) conf = parse_config(train_conf, "is_predict=1") - self.network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) + self.network = swig_paddle.GradientMachine.createFromConfigProto( + conf.model_config) self.network.loadParameters(self.model_dir) input_types = [integer_value_sequence(self.dict_dim)] self.converter = DataProviderConverter(input_types) @@ -61,7 +62,7 @@ def load_label(self, label_file): """ Load label. """ - self.label={} + self.label = {} for v in open(label_file, 'r'): self.label[int(v.split('\t')[1])] = v.split('\t')[0] @@ -72,7 +73,9 @@ def get_data(self, data_file): with open(data_file, 'r') as fdata: for line in fdata: words = line.strip().split() - word_slot = [self.word_dict[w] for w in words if w in self.word_dict] + word_slot = [ + self.word_dict[w] for w in words if w in self.word_dict + ] if not word_slot: print "all words are not in dictionary: %s", line continue @@ -89,25 +92,48 @@ def predict(self, data_file): if self.label is None: print("%s: predicting label is %d" % (data_file, lab[0][0])) else: - print("%s: predicting label is %s" % (data_file, self.label[lab[0][0]])) + print("%s: predicting label is %s" % + (data_file, self.label[lab[0][0]])) + def option_parser(): usage = "python predict.py -n config -w model_dir -d dictionary -i input_file " parser = OptionParser(usage="usage: %s [options]" % usage) - parser.add_option("-n", "--tconf", action="store", - dest="train_conf", help="network config") - parser.add_option("-d", "--dict", action="store", - dest="dict_file",help="dictionary file") - parser.add_option("-b", "--label", action="store", - dest="label", default=None, - help="dictionary file") - parser.add_option("-i", "--data", action="store", - dest="data", help="data file to predict") - parser.add_option("-w", "--model", action="store", - dest="model_path", default=None, - help="model path") + parser.add_option( + "-n", + "--tconf", + action="store", + dest="train_conf", + help="network config") + parser.add_option( + "-d", + "--dict", + action="store", + dest="dict_file", + help="dictionary file") + parser.add_option( + "-b", + "--label", + action="store", + dest="label", + default=None, + help="dictionary file") + parser.add_option( + "-i", + "--data", + action="store", + dest="data", + help="data file to predict") + parser.add_option( + "-w", + "--model", + action="store", + dest="model_path", + default=None, + help="model path") return parser.parse_args() + def main(): options, args = option_parser() train_conf = options.train_conf @@ -119,5 +145,6 @@ def main(): predict = SentimentPrediction(train_conf, dict_file, model_path, label) predict.predict(data) + if __name__ == '__main__': main() diff --git a/demo/sentiment/preprocess.py b/demo/sentiment/preprocess.py index 49b53d500a1bf..7146e95d751c4 100755 --- a/demo/sentiment/preprocess.py +++ b/demo/sentiment/preprocess.py @@ -22,13 +22,13 @@ from optparse import OptionParser from paddle.utils.preprocess_util import * - """ Usage: run following command to show help message. python preprocess.py -h """ -def save_dict(dict, filename, is_reverse = True): + +def save_dict(dict, filename, is_reverse=True): """ Save dictionary into file. dict: input dictionary. @@ -39,9 +39,10 @@ def save_dict(dict, filename, is_reverse = True): f = open(filename, 'w') for k, v in sorted(dict.items(), key=operator.itemgetter(1),\ reverse=is_reverse): - f.write('%s\t%s\n'%(k, v)) + f.write('%s\t%s\n' % (k, v)) f.close() + def tokenize(sentences): """ Use tokenizer.perl to tokenize input sentences. @@ -58,6 +59,7 @@ def tokenize(sentences): toks = tok_text.split('\n')[:-1] return toks + def read_lines(path): """ path: String, file path. @@ -71,12 +73,17 @@ def read_lines(path): seqs.append(line) return seqs + class SentimentDataSetCreate(): """ A class to process data for sentiment analysis task. """ - def __init__(self, data_path, output_path, - use_okenizer = True, multi_lines = False): + + def __init__(self, + data_path, + output_path, + use_okenizer=True, + multi_lines=False): """ data_path: string, traing and testing dataset path output_path: string, output path, store processed dataset @@ -164,23 +171,17 @@ def create_dataset(self): # Preprocess train data. train_data, train_lab_set = self.data_list(self.train_dir) print "processing train set..." - file_lists = self.save_data(train_data, - "train", - self.batch_size, - True, - True) + file_lists = self.save_data(train_data, "train", self.batch_size, True, + True) save_list(file_lists, self.train_list) # If have test data path, preprocess test data. if os.path.exists(self.test_dir): test_data, test_lab_set = self.data_list(self.test_dir) - assert(train_lab_set == test_lab_set) + assert (train_lab_set == test_lab_set) print "processing test set..." - file_lists = self.save_data(test_data, - "test", - self.batch_size, - False, - self.dict_with_test) + file_lists = self.save_data(test_data, "test", self.batch_size, + False, self.dict_with_test) save_list(file_lists, self.test_list) # save labels set. @@ -191,7 +192,9 @@ def create_dataset(self): save_dict(self.word_count, self.dict_file, True) self.dict_size = len(self.word_count) - def save_data(self, data, prefix = "", + def save_data(self, + data, + prefix="", batch_size=50000, is_shuffle=False, build_dict=False): @@ -205,7 +208,8 @@ def save_data(self, data, prefix = "", return: list of batch names """ if is_shuffle and self.multi_lines: - return self.save_data_multi_lines(data, prefix, batch_size, build_dict) + return self.save_data_multi_lines(data, prefix, batch_size, + build_dict) if is_shuffle: random.shuffle(data) @@ -213,7 +217,7 @@ def save_data(self, data, prefix = "", batch_names = [] for i in range(num_batches): batch_name = join_path(self.output_path, - "%s_part_%03d" %(prefix, i)) + "%s_part_%03d" % (prefix, i)) begin = i * batch_size end = min((i + 1) * batch_size, len(data)) # read a batch of data @@ -246,7 +250,9 @@ def get_data_list(self, begin, end, data): data_list = tokenize(data_list) return label_list, data_list - def save_data_multi_lines(self, data, prefix = "", + def save_data_multi_lines(self, + data, + prefix="", batch_size=50000, build_dict=False): """ @@ -274,14 +280,14 @@ def save_data_multi_lines(self, data, prefix = "", self.create_dict(data_list) length = len(label_list) - perm_list = np.array([ i for i in xrange(length) ]) + perm_list = np.array([i for i in xrange(length)]) random.shuffle(perm_list) num_batches = int(math.ceil(length / float(batch_size))) batch_names = [] for i in range(num_batches): batch_name = join_path(self.output_path, - "%s_part_%03d" %(prefix, i)) + "%s_part_%03d" % (prefix, i)) begin = i * batch_size end = min((i + 1) * batch_size, length) sub_label = [label_list[perm_list[i]] for i in range(begin, end)] @@ -304,35 +310,50 @@ def save_file(self, label_list, data_list, filename): f.write('%s\t\t%s\n' % (lab, seq)) f.close() + def option_parser(): parser = OptionParser(usage="usage: python preprcoess.py "\ "-i data_dir [options]") - parser.add_option("-i", "--data", action="store", - dest="input", help="Input data directory.") - parser.add_option("-o", "--output", action="store", - dest="output", default=None, - help="Output directory.") - parser.add_option("-t", "--tokenizer", action="store", - dest="use_tokenizer", default=True, - help="Whether to use tokenizer.") + parser.add_option( + "-i", + "--data", + action="store", + dest="input", + help="Input data directory.") + parser.add_option( + "-o", + "--output", + action="store", + dest="output", + default=None, + help="Output directory.") + parser.add_option( + "-t", + "--tokenizer", + action="store", + dest="use_tokenizer", + default=True, + help="Whether to use tokenizer.") parser.add_option("-m", "--multi_lines", action="store", dest="multi_lines", default=False, help="If input text files have multi lines and they "\ "need to be shuffled, you should set -m True,") return parser.parse_args() + def main(): options, args = option_parser() - data_dir=options.input - output_dir=options.output - use_tokenizer=options.use_tokenizer - multi_lines=options.multi_lines + data_dir = options.input + output_dir = options.output + use_tokenizer = options.use_tokenizer + multi_lines = options.multi_lines if output_dir is None: outname = os.path.basename(options.input) output_dir = join_path(os.path.dirname(data_dir), 'pre-' + outname) - data_creator = SentimentDataSetCreate(data_dir, output_dir, - use_tokenizer, multi_lines) + data_creator = SentimentDataSetCreate(data_dir, output_dir, use_tokenizer, + multi_lines) data_creator.create_dataset() + if __name__ == '__main__': main() diff --git a/demo/sentiment/sentiment_net.py b/demo/sentiment/sentiment_net.py index 31e585edcaa11..ff6a3624a404c 100644 --- a/demo/sentiment/sentiment_net.py +++ b/demo/sentiment/sentiment_net.py @@ -47,10 +47,12 @@ def sentiment_data(data_dir=None, for i, line in enumerate(open(dict_file, 'r')): word_dict[line.split('\t')[0]] = i - define_py_data_sources2(train_list, test_list, - module="dataprovider", - obj="process", - args={'dictionary': word_dict}) + define_py_data_sources2( + train_list, + test_list, + module="dataprovider", + obj="process", + args={'dictionary': word_dict}) return dict_dim, class_dim @@ -64,8 +66,7 @@ def bidirectional_lstm_net(input_dim, emb = embedding_layer(input=data, size=emb_dim) bi_lstm = bidirectional_lstm(input=emb, size=lstm_dim) dropout = dropout_layer(input=bi_lstm, dropout_rate=0.5) - output = fc_layer(input=dropout, size=class_dim, - act=SoftmaxActivation()) + output = fc_layer(input=dropout, size=class_dim, act=SoftmaxActivation()) if not is_predict: lbl = data_layer("label", 1) @@ -109,27 +110,36 @@ def stacked_lstm_net(input_dim, data = data_layer("word", input_dim) emb = embedding_layer(input=data, size=emb_dim) - fc1 = fc_layer(input=emb, size=hid_dim, act=linear, - bias_attr=bias_attr) - lstm1 = lstmemory(input=fc1, act=relu, bias_attr=bias_attr, - layer_attr=layer_attr) + fc1 = fc_layer(input=emb, size=hid_dim, act=linear, bias_attr=bias_attr) + lstm1 = lstmemory( + input=fc1, act=relu, bias_attr=bias_attr, layer_attr=layer_attr) inputs = [fc1, lstm1] for i in range(2, stacked_num + 1): - fc = fc_layer(input=inputs, size=hid_dim, act=linear, - param_attr=para_attr, bias_attr=bias_attr) - lstm = lstmemory(input=fc, reverse=(i % 2) == 0, act=relu, - bias_attr=bias_attr, layer_attr=layer_attr) + fc = fc_layer( + input=inputs, + size=hid_dim, + act=linear, + param_attr=para_attr, + bias_attr=bias_attr) + lstm = lstmemory( + input=fc, + reverse=(i % 2) == 0, + act=relu, + bias_attr=bias_attr, + layer_attr=layer_attr) inputs = [fc, lstm] fc_last = pooling_layer(input=inputs[0], pooling_type=MaxPooling()) lstm_last = pooling_layer(input=inputs[1], pooling_type=MaxPooling()) - output = fc_layer(input=[fc_last, lstm_last], size=class_dim, - act=SoftmaxActivation(), - bias_attr=bias_attr, param_attr=para_attr) + output = fc_layer( + input=[fc_last, lstm_last], + size=class_dim, + act=SoftmaxActivation(), + bias_attr=bias_attr, + param_attr=para_attr) if is_predict: outputs(output) else: - outputs( - classification_cost(input=output, label=data_layer('label', 1))) + outputs(classification_cost(input=output, label=data_layer('label', 1))) diff --git a/demo/sentiment/trainer_config.py b/demo/sentiment/trainer_config.py index db24182a8d735..894070e7c97dc 100644 --- a/demo/sentiment/trainer_config.py +++ b/demo/sentiment/trainer_config.py @@ -20,20 +20,19 @@ # whether this config is used for prediction is_predict = get_config_arg('is_predict', bool, False) -data_dir = "./data/pre-imdb" +data_dir = "./data/pre-imdb" dict_dim, class_dim = sentiment_data(data_dir, is_test, is_predict) ################## Algorithm Config ##################### settings( - batch_size=128, - learning_rate=2e-3, - learning_method=AdamOptimizer(), - regularization=L2Regularization(8e-4), - gradient_clipping_threshold=25 -) + batch_size=128, + learning_rate=2e-3, + learning_method=AdamOptimizer(), + regularization=L2Regularization(8e-4), + gradient_clipping_threshold=25) #################### Network Config ###################### -stacked_lstm_net(dict_dim, class_dim=class_dim, - stacked_num=3, is_predict=is_predict) +stacked_lstm_net( + dict_dim, class_dim=class_dim, stacked_num=3, is_predict=is_predict) # bidirectional_lstm_net(dict_dim, class_dim=class_dim, is_predict=is_predict) diff --git a/demo/seqToseq/dataprovider.py b/demo/seqToseq/dataprovider.py index df19db109ed22..c5da1b7685f47 100755 --- a/demo/seqToseq/dataprovider.py +++ b/demo/seqToseq/dataprovider.py @@ -30,14 +30,14 @@ def hook(settings, src_dict, trg_dict, file_list, **kwargs): if settings.job_mode: settings.trg_dict = trg_dict settings.slots = [ - integer_value_sequence(len(settings.src_dict)), - integer_value_sequence(len(settings.trg_dict)), + integer_value_sequence(len(settings.src_dict)), + integer_value_sequence(len(settings.trg_dict)), integer_value_sequence(len(settings.trg_dict)) ] settings.logger.info("trg dict len : %d" % (len(settings.trg_dict))) else: settings.slots = [ - integer_value_sequence(len(settings.src_dict)), + integer_value_sequence(len(settings.src_dict)), integer_value_sequence(len(open(file_list[0], "r").readlines())) ] @@ -62,8 +62,7 @@ def process(settings, file_name): if settings.job_mode: trg_seq = line_split[1] # one target sequence trg_words = trg_seq.split() - trg_ids = [settings.trg_dict.get(w, UNK_IDX) - for w in trg_words] + trg_ids = [settings.trg_dict.get(w, UNK_IDX) for w in trg_words] # remove sequence whose length > 80 in training mode if len(src_ids) > 80 or len(trg_ids) > 80: diff --git a/demo/seqToseq/preprocess.py b/demo/seqToseq/preprocess.py index 5efb17a664b9a..bd1c51b1514b7 100755 --- a/demo/seqToseq/preprocess.py +++ b/demo/seqToseq/preprocess.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Example: python preprocess.py -i INPUT [-d DICTSIZE] [-m] @@ -24,12 +23,13 @@ -m --mergeDict merge source and target dictionary """ import os -import sys +import sys import string from optparse import OptionParser from paddle.utils.preprocess_util import save_list, DatasetCreater + class SeqToSeqDatasetCreater(DatasetCreater): """ A class to process data for sequence to sequence application. @@ -75,7 +75,7 @@ def cat_file(self, dir_path, suffix, output_path, output): if not os.path.exists(output): os.system(cmd + '> ' + output) - def build_dict(self, file_path, dict_path, dict_size = -1): + def build_dict(self, file_path, dict_path, dict_size=-1): """ Create the dictionary for the file, Note that 1. Valid characters include all printable characters @@ -99,20 +99,23 @@ def build_dict(self, file_path, dict_path, dict_size = -1): for word in words: if word not in dictory: dictory[word] = 1 - else: + else: dictory[word] += 1 output = open(dict_path, "w+") output.write('\n\n\n') count = 3 - for key, value in sorted(dictory.items(), key = lambda d:d[1], reverse = True): + for key, value in sorted( + dictory.items(), key=lambda d: d[1], reverse=True): output.write(key + "\n") count += 1 if count == dict_size: break self.dict_size = count - - def create_dataset(self, dict_size = -1, mergeDict = False, - suffixes = ['.src', '.trg']): + + def create_dataset(self, + dict_size=-1, + mergeDict=False, + suffixes=['.src', '.trg']): """ Create seqToseq dataset """ @@ -135,13 +138,14 @@ def create_dataset(self, dict_size = -1, mergeDict = False, # checkout dataset should be parallel corpora suffix_len = len(suffixes[0]) for dataset in dataset_list: - file_list = os.listdir(dataset) - if len(file_list) % 2 == 1: - raise RuntimeError("dataset should be parallel corpora") - file_list.sort() - for i in range(0, len(file_list), 2): - if file_list[i][:-suffix_len] != file_list[i + 1][:-suffix_len]: - raise RuntimeError("source and target file name should be equal") + file_list = os.listdir(dataset) + if len(file_list) % 2 == 1: + raise RuntimeError("dataset should be parallel corpora") + file_list.sort() + for i in range(0, len(file_list), 2): + if file_list[i][:-suffix_len] != file_list[i + 1][:-suffix_len]: + raise RuntimeError( + "source and target file name should be equal") # cat all the files with the same suffix in dataset for suffix in suffixes: @@ -155,16 +159,18 @@ def create_dataset(self, dict_size = -1, mergeDict = False, list = ['train.list', 'test.list', 'gen.list'] for dataset in dataset_list: outname = os.path.basename(dataset) - self.concat_file(dataset, outname + suffixes[0], + self.concat_file(dataset, outname + suffixes[0], outname + suffixes[1], dir_list[id], outname) - save_list([os.path.join(dir_list[id], outname)], + save_list([os.path.join(dir_list[id], outname)], os.path.join(self.output_path, list[id])) id += 1 # build dictionary for train data dict = ['src.dict', 'trg.dict'] - dict_path = [os.path.join(self.output_path, dict[0]), - os.path.join(self.output_path, dict[1])] + dict_path = [ + os.path.join(self.output_path, dict[0]), + os.path.join(self.output_path, dict[1]) + ] if mergeDict: outname = os.path.join(train_dir, train_dataset.split('/')[-1]) print 'build src dictionary for train data' @@ -173,22 +179,30 @@ def create_dataset(self, dict_size = -1, mergeDict = False, os.system('cp ' + dict_path[0] + ' ' + dict_path[1]) else: outname = os.path.join(train_dataset, self.train_dir_name) - for id in range(0,2): + for id in range(0, 2): suffix = suffixes[id] print 'build ' + suffix[1:] + ' dictionary for train data' self.build_dict(outname + suffix, dict_path[id], dict_size) print 'dictionary size is', self.dict_size + def main(): usage = "usage: \n" \ "python %prog -i INPUT [-d DICTSIZE] [-m]" parser = OptionParser(usage) - parser.add_option("-i", action="store", dest="input", - help="input original dataset path") - parser.add_option("-d", action="store", dest="dictsize", - help="specified word count of dictionary") - parser.add_option("-m", "--mergeDict", action="store_true", dest="mergeDict", - help="merge source and target dictionary") + parser.add_option( + "-i", action="store", dest="input", help="input original dataset path") + parser.add_option( + "-d", + action="store", + dest="dictsize", + help="specified word count of dictionary") + parser.add_option( + "-m", + "--mergeDict", + action="store_true", + dest="mergeDict", + help="merge source and target dictionary") (options, args) = parser.parse_args() if options.input[-1] == os.path.sep: options.input = options.input[:-1] @@ -200,5 +214,6 @@ def main(): data_creator = SeqToSeqDatasetCreater(options.input, output_path) data_creator.create_dataset(dictsize, options.mergeDict) + if __name__ == "__main__": - main(); + main() diff --git a/demo/seqToseq/seqToseq_net.py b/demo/seqToseq/seqToseq_net.py index edd6ad3f739b6..ad5e3339c1461 100644 --- a/demo/seqToseq/seqToseq_net.py +++ b/demo/seqToseq/seqToseq_net.py @@ -50,16 +50,21 @@ def seq_to_seq_data(data_dir, trg_dict = None else: train_list = os.path.join(data_dir, train_list) - test_list = os.path.join(data_dir,test_list) + test_list = os.path.join(data_dir, test_list) - define_py_data_sources2(train_list, test_list, - module = "dataprovider", - obj = "process", - args = {"src_dict": src_dict, - "trg_dict": trg_dict}) + define_py_data_sources2( + train_list, + test_list, + module="dataprovider", + obj="process", + args={"src_dict": src_dict, + "trg_dict": trg_dict}) - return {"src_dict_path": src_lang_dict, "trg_dict_path": trg_lang_dict, - "gen_result": gen_result} + return { + "src_dict_path": src_lang_dict, + "trg_dict_path": trg_lang_dict, + "gen_result": gen_result + } def gru_encoder_decoder(data_conf, @@ -90,51 +95,55 @@ def gru_encoder_decoder(data_conf, size=word_vector_dim, param_attr=ParamAttr(name='_source_language_embedding')) src_forward = simple_gru(input=src_embedding, size=encoder_size) - src_backward = simple_gru(input=src_embedding, - size=encoder_size, - reverse=True) + src_backward = simple_gru( + input=src_embedding, size=encoder_size, reverse=True) encoded_vector = concat_layer(input=[src_forward, src_backward]) with mixed_layer(size=decoder_size) as encoded_proj: encoded_proj += full_matrix_projection(input=encoded_vector) backward_first = first_seq(input=src_backward) - with mixed_layer(size=decoder_size, - act=TanhActivation(), ) as decoder_boot: + with mixed_layer( + size=decoder_size, + act=TanhActivation(), ) as decoder_boot: decoder_boot += full_matrix_projection(input=backward_first) def gru_decoder_with_attention(enc_vec, enc_proj, current_word): - decoder_mem = memory(name='gru_decoder', - size=decoder_size, - boot_layer=decoder_boot) + decoder_mem = memory( + name='gru_decoder', size=decoder_size, boot_layer=decoder_boot) - context = simple_attention(encoded_sequence=enc_vec, - encoded_proj=enc_proj, - decoder_state=decoder_mem, ) + context = simple_attention( + encoded_sequence=enc_vec, + encoded_proj=enc_proj, + decoder_state=decoder_mem, ) with mixed_layer(size=decoder_size * 3) as decoder_inputs: decoder_inputs += full_matrix_projection(input=context) decoder_inputs += full_matrix_projection(input=current_word) - gru_step = gru_step_layer(name='gru_decoder', - input=decoder_inputs, - output_mem=decoder_mem, - size=decoder_size) + gru_step = gru_step_layer( + name='gru_decoder', + input=decoder_inputs, + output_mem=decoder_mem, + size=decoder_size) - with mixed_layer(size=target_dict_dim, - bias_attr=True, - act=SoftmaxActivation()) as out: + with mixed_layer( + size=target_dict_dim, bias_attr=True, + act=SoftmaxActivation()) as out: out += full_matrix_projection(input=gru_step) return out decoder_group_name = "decoder_group" - group_inputs=[StaticInput(input=encoded_vector,is_seq=True), - StaticInput(input=encoded_proj,is_seq=True)] + group_inputs = [ + StaticInput( + input=encoded_vector, is_seq=True), StaticInput( + input=encoded_proj, is_seq=True) + ] if not is_generating: trg_embedding = embedding_layer( - input=data_layer(name='target_language_word', - size=target_dict_dim), + input=data_layer( + name='target_language_word', size=target_dict_dim), size=word_vector_dim, param_attr=ParamAttr(name='_target_language_embedding')) group_inputs.append(trg_embedding) @@ -144,12 +153,12 @@ def gru_decoder_with_attention(enc_vec, enc_proj, current_word): # while encoded source sequence is accessed to as an unbounded memory. # Here, the StaticInput defines a read-only memory # for the recurrent_group. - decoder = recurrent_group(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs) + decoder = recurrent_group( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs) - lbl = data_layer(name='target_language_next_word', - size=target_dict_dim) + lbl = data_layer(name='target_language_next_word', size=target_dict_dim) cost = classification_cost(input=decoder, label=lbl) outputs(cost) else: @@ -168,16 +177,19 @@ def gru_decoder_with_attention(enc_vec, enc_proj, current_word): embedding_size=word_vector_dim) group_inputs.append(trg_embedding) - beam_gen = beam_search(name=decoder_group_name, - step=gru_decoder_with_attention, - input=group_inputs, - bos_id=0, - eos_id=1, - beam_size=beam_size, - max_length=max_length) - - seqtext_printer_evaluator(input=beam_gen, - id_input=data_layer(name="sent_id", size=1), - dict_file=trg_dict_path, - result_file=gen_trans_file) + beam_gen = beam_search( + name=decoder_group_name, + step=gru_decoder_with_attention, + input=group_inputs, + bos_id=0, + eos_id=1, + beam_size=beam_size, + max_length=max_length) + + seqtext_printer_evaluator( + input=beam_gen, + id_input=data_layer( + name="sent_id", size=1), + dict_file=trg_dict_path, + result_file=gen_trans_file) outputs(beam_gen) diff --git a/demo/sequence_tagging/dataprovider.py b/demo/sequence_tagging/dataprovider.py index 6f412d6834be6..37dcb7aa17c0a 100644 --- a/demo/sequence_tagging/dataprovider.py +++ b/demo/sequence_tagging/dataprovider.py @@ -17,8 +17,7 @@ import logging logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', -) + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) logger = logging.getLogger('paddle') logger.setLevel(logging.INFO) @@ -32,59 +31,58 @@ # [[-1,0], [0,0]] means previous token at column 0 and current token at # column 0 are combined as one feature. patterns = [ - [[-2,0]], - [[-1,0]], - [[0,0]], - [[1,0]], - [[2,0]], - - [[-1,0], [0,0]], - [[0,0], [1,0]], - - [[-2,1]], - [[-1,1]], - [[0,1]], - [[1,1]], - [[2,1]], - [[-2,1], [-1,1]], - [[-1,1], [0,1]], - [[0,1], [1,1]], - [[1,1], [2,1]], - - [[-2,1], [-1,1], [0,1]], - [[-1,1], [0,1], [1,1]], - [[0,1], [1,1], [2,1]], + [[-2, 0]], + [[-1, 0]], + [[0, 0]], + [[1, 0]], + [[2, 0]], + [[-1, 0], [0, 0]], + [[0, 0], [1, 0]], + [[-2, 1]], + [[-1, 1]], + [[0, 1]], + [[1, 1]], + [[2, 1]], + [[-2, 1], [-1, 1]], + [[-1, 1], [0, 1]], + [[0, 1], [1, 1]], + [[1, 1], [2, 1]], + [[-2, 1], [-1, 1], [0, 1]], + [[-1, 1], [0, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], ] dict_label = { - 'B-ADJP': 0, - 'I-ADJP': 1, - 'B-ADVP': 2, - 'I-ADVP': 3, - 'B-CONJP': 4, - 'I-CONJP': 5, - 'B-INTJ': 6, - 'I-INTJ': 7, - 'B-LST': 8, - 'I-LST': 9, - 'B-NP': 10, - 'I-NP': 11, - 'B-PP': 12, - 'I-PP': 13, - 'B-PRT': 14, - 'I-PRT': 15, - 'B-SBAR': 16, - 'I-SBAR': 17, - 'B-UCP': 18, - 'I-UCP': 19, - 'B-VP': 20, - 'I-VP': 21, - 'O': 22 + 'B-ADJP': 0, + 'I-ADJP': 1, + 'B-ADVP': 2, + 'I-ADVP': 3, + 'B-CONJP': 4, + 'I-CONJP': 5, + 'B-INTJ': 6, + 'I-INTJ': 7, + 'B-LST': 8, + 'I-LST': 9, + 'B-NP': 10, + 'I-NP': 11, + 'B-PP': 12, + 'I-PP': 13, + 'B-PRT': 14, + 'I-PRT': 15, + 'B-SBAR': 16, + 'I-SBAR': 17, + 'B-UCP': 18, + 'I-UCP': 19, + 'B-VP': 20, + 'I-VP': 21, + 'O': 22 } + def make_features(sequence): length = len(sequence) num_features = len(sequence[0]) + def get_features(pos): if pos < 0: return ['#B%s' % -pos] * num_features @@ -94,9 +92,10 @@ def get_features(pos): for i in xrange(length): for pattern in patterns: - fname = '/'.join([get_features(i+pos)[f] for pos, f in pattern]) + fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern]) sequence[i].append(fname) + ''' Source file format: Each line is for one timestep. The features are separated by space. @@ -109,6 +108,8 @@ def get_features(pos): return a list of dict for each column ''' + + def create_dictionaries(filename, cutoff, oov_policy): def add_to_dict(sequence, dicts): num_features = len(dicts) @@ -140,7 +141,6 @@ def add_to_dict(sequence, dicts): features = line.split(' ') sequence.append(features) - for i in xrange(num_features): dct = dicts[i] n = 1 if oov_policy[i] == OOV_POLICY_USE else 0 @@ -151,7 +151,7 @@ def add_to_dict(sequence, dicts): else: dct[k] = n n += 1 - + if oov_policy[i] == OOV_POLICY_USE: # placeholder so that len(dct) will be the number of features # including OOV @@ -187,12 +187,15 @@ def initializer(settings, **xargs): logger.info("feature size=%s" % dim) settings.input_types = input_types + ''' if oov_policy[i] == OOV_POLICY_USE, features in i-th column which are not existed in dicts[i] will be assigned to id 0. if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist in dicts[i]. ''' + + @provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM) def process(settings, filename): input_file = filename @@ -231,7 +234,7 @@ def gen_sample(sequence): logger.fatal("Unknown token: %s" % features[i]) else: vec.ids.append(dim + 0) - + dim += len(dicts[i]) sample[-1].append(vec) return sample @@ -255,4 +258,3 @@ def gen_sample(sequence): f.close() logger.info("num_sequences=%s" % num_sequences) - diff --git a/demo/sequence_tagging/linear_crf.py b/demo/sequence_tagging/linear_crf.py index 2bd1a20bc52fc..64895742e1b8c 100644 --- a/demo/sequence_tagging/linear_crf.py +++ b/demo/sequence_tagging/linear_crf.py @@ -16,11 +16,11 @@ import math -define_py_data_sources2(train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") - +define_py_data_sources2( + train_list="data/train.list", + test_list="data/test.list", + module="dataprovider", + obj="process") batch_size = 1 settings( @@ -30,14 +30,15 @@ average_window=0.5, learning_rate=1e-1, learning_rate_decay_a=1e-5, - learning_rate_decay_b=0.25, -) + learning_rate_decay_b=0.25, ) + +num_label_types = 23 -num_label_types=23 def get_simd_size(size): return int(math.ceil(float(size) / 8)) * 8 + # Currently, in order to use sparse_update=True, # the size has to be aligned. num_label_types = get_simd_size(num_label_types) @@ -45,40 +46,37 @@ def get_simd_size(size): features = data_layer(name="features", size=76328) word = data_layer(name="word", size=6778) pos = data_layer(name="pos", size=44) -chunk = data_layer(name="chunk", - size=num_label_types) +chunk = data_layer(name="chunk", size=num_label_types) crf_input = fc_layer( input=features, size=num_label_types, act=LinearActivation(), bias_attr=False, - param_attr=ParamAttr(initial_std=0, sparse_update=True)) + param_attr=ParamAttr( + initial_std=0, sparse_update=True)) -crf=crf_layer( +crf = crf_layer( input=crf_input, label=chunk, - param_attr=ParamAttr(name="crfw", initial_std=0), -) + param_attr=ParamAttr( + name="crfw", initial_std=0), ) -crf_decoding=crf_decoding_layer( +crf_decoding = crf_decoding_layer( size=num_label_types, input=crf_input, label=chunk, - param_attr=ParamAttr(name="crfw"), -) + param_attr=ParamAttr(name="crfw"), ) sum_evaluator( name="error", - input=crf_decoding, -) + input=crf_decoding, ) chunk_evaluator( name="chunk_f1", - input =[crf_decoding, chunk], + input=[crf_decoding, chunk], chunk_scheme="IOB", - num_chunk_types=11, -) + num_chunk_types=11, ) inputs(word, pos, chunk, features) outputs(crf) diff --git a/demo/sequence_tagging/rnn_crf.py b/demo/sequence_tagging/rnn_crf.py index fb157bf3ea719..90d4bbdddfdb4 100644 --- a/demo/sequence_tagging/rnn_crf.py +++ b/demo/sequence_tagging/rnn_crf.py @@ -16,10 +16,11 @@ import math -define_py_data_sources2(train_list="data/train.list", - test_list="data/test.list", - module="dataprovider", - obj="process") +define_py_data_sources2( + train_list="data/train.list", + test_list="data/test.list", + module="dataprovider", + obj="process") batch_size = 16 settings( @@ -27,29 +28,27 @@ batch_size=batch_size, regularization=L2Regularization(batch_size * 1e-5), average_window=0.5, - learning_rate = 2e-3, - learning_rate_decay_a = 5e-7, - learning_rate_decay_b = 0.5, -) + learning_rate=2e-3, + learning_rate_decay_a=5e-7, + learning_rate_decay_b=0.5, ) -word_dim=128 +word_dim = 128 hidden_dim = 128 with_rnn = True -initial_std=1/math.sqrt(hidden_dim) -param_attr=ParamAttr(initial_std=initial_std) -cpu_layer_attr=ExtraLayerAttribute(device=-1) +initial_std = 1 / math.sqrt(hidden_dim) +param_attr = ParamAttr(initial_std=initial_std) +cpu_layer_attr = ExtraLayerAttribute(device=-1) default_device(0) -num_label_types=23 +num_label_types = 23 features = data_layer(name="features", size=76328) word = data_layer(name="word", size=6778) pos = data_layer(name="pos", size=44) -chunk = data_layer(name="chunk", - size=num_label_types, - layer_attr=cpu_layer_attr) +chunk = data_layer( + name="chunk", size=num_label_types, layer_attr=cpu_layer_attr) emb = embedding_layer( input=word, size=word_dim, param_attr=ParamAttr(initial_std=0)) @@ -58,73 +57,64 @@ size=hidden_dim, act=STanhActivation(), bias_attr=True, - input=[full_matrix_projection(emb), - table_projection(pos, param_attr=param_attr)] -) + input=[ + full_matrix_projection(emb), table_projection( + pos, param_attr=param_attr) + ]) if with_rnn: rnn1 = recurrent_layer( act=ReluActivation(), bias_attr=True, input=hidden1, - param_attr=ParamAttr(initial_std=0), - ) + param_attr=ParamAttr(initial_std=0), ) hidden2 = mixed_layer( size=hidden_dim, act=STanhActivation(), bias_attr=True, - input=[full_matrix_projection(hidden1) - ] + ([ - full_matrix_projection(rnn1, param_attr=ParamAttr(initial_std=0)) - ] if with_rnn else []), -) + input=[full_matrix_projection(hidden1)] + + ([full_matrix_projection( + rnn1, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) if with_rnn: - rnn2=recurrent_layer( + rnn2 = recurrent_layer( reverse=True, act=ReluActivation(), bias_attr=True, input=hidden2, - param_attr=ParamAttr(initial_std=0), - ) + param_attr=ParamAttr(initial_std=0), ) crf_input = mixed_layer( size=num_label_types, bias_attr=False, - input=[ - full_matrix_projection(hidden2), - ] + ([ - full_matrix_projection(rnn2, param_attr=ParamAttr(initial_std=0)) - ] if with_rnn else []), -) + input=[full_matrix_projection(hidden2), ] + + ([full_matrix_projection( + rnn2, param_attr=ParamAttr(initial_std=0))] if with_rnn else []), ) crf = crf_layer( input=crf_input, label=chunk, - param_attr=ParamAttr(name="crfw", initial_std=0), - layer_attr=cpu_layer_attr, -) + param_attr=ParamAttr( + name="crfw", initial_std=0), + layer_attr=cpu_layer_attr, ) crf_decoding = crf_decoding_layer( size=num_label_types, input=crf_input, label=chunk, param_attr=ParamAttr(name="crfw"), - layer_attr=cpu_layer_attr, -) + layer_attr=cpu_layer_attr, ) sum_evaluator( name="error", - input=crf_decoding, -) + input=crf_decoding, ) chunk_evaluator( name="chunk_f1", - input =[crf_decoding, chunk], + input=[crf_decoding, chunk], chunk_scheme="IOB", - num_chunk_types=11, -) + num_chunk_types=11, ) inputs(word, pos, chunk, features) outputs(crf) diff --git a/doc/ui/predict/predict_sample.py b/doc/ui/predict/predict_sample.py index d55d2c730dece..63e8b36d26057 100644 --- a/doc/ui/predict/predict_sample.py +++ b/doc/ui/predict/predict_sample.py @@ -16,82 +16,113 @@ from paddle.trainer.PyDataProvider2 import dense_vector from paddle.trainer.config_parser import parse_config -TEST_DATA = [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.215686, - 0.533333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.67451, - 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.886275, - 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.192157, 0.070588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0.670588, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.117647, 0.933333, 0.858824, 0.313725, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0.090196, 0.858824, 0.992157, 0.831373, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.141176, - 0.992157, 0.992157, 0.611765, 0.054902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.258824, 0.992157, 0.992157, - 0.529412, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.368627, 0.992157, 0.992157, 0.419608, 0.003922, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0.094118, 0.835294, 0.992157, 0.992157, 0.517647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.603922, 0.992157, - 0.992157, 0.992157, 0.603922, 0.545098, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.992157, - 0.956863, 0.062745, 0, 0, 0, 0, 0, 0, 0, 0, 0.011765, 0.666667, 0.992157, 0.992157, 0.992157, 0.992157, - 0.992157, 0.745098, 0.137255, 0, 0, 0, 0, 0, 0.152941, 0.866667, 0.992157, 0.992157, 0.521569, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.992157, 0.803922, 0.352941, 0.745098, 0.992157, - 0.945098, 0.317647, 0, 0, 0, 0, 0.580392, 0.992157, 0.992157, 0.764706, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0.070588, 0.992157, 0.992157, 0.776471, 0.043137, 0, 0.007843, 0.27451, 0.882353, 0.941176, 0.176471, - 0, 0, 0.180392, 0.898039, 0.992157, 0.992157, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, - 0.992157, 0.713725, 0, 0, 0, 0, 0.627451, 0.992157, 0.729412, 0.062745, 0, 0.509804, 0.992157, 0.992157, - 0.776471, 0.035294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.494118, 0.992157, 0.992157, 0.968627, 0.168627, 0, 0, - 0, 0.423529, 0.992157, 0.992157, 0.364706, 0, 0.717647, 0.992157, 0.992157, 0.317647, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0.533333, 0.992157, 0.984314, 0.945098, 0.603922, 0, 0, 0, 0.003922, 0.466667, 0.992157, - 0.988235, 0.976471, 0.992157, 0.992157, 0.788235, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.686275, - 0.882353, 0.364706, 0, 0, 0, 0, 0, 0, 0.098039, 0.588235, 0.992157, 0.992157, 0.992157, 0.980392, - 0.305882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.101961, 0.67451, 0.321569, 0, 0, 0, 0, 0, 0, 0, 0.105882, - 0.733333, 0.976471, 0.811765, 0.713725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.65098, 0.992157, - 0.321569, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25098, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - 0.94902, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.968627, - 0.764706, 0.152941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.498039, - 0.25098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [ - [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.298039, 0.333333, 0.333333, 0.333333, 0.337255, 0.333333, - 0.333333, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027451, 0.223529, 0.776471, - 0.964706, 0.988235, 0.988235, 0.988235, 0.992157, 0.988235, 0.988235, 0.780392, 0.098039, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.14902, 0.698039, 0.988235, 0.992157, 0.988235, 0.901961, 0.87451, - 0.568627, 0.882353, 0.976471, 0.988235, 0.988235, 0.501961, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0.188235, 0.647059, 0.988235, 0.988235, 0.745098, 0.439216, 0.098039, 0, 0, 0, 0.572549, 0.988235, - 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157, 0.941176, - 0.247059, 0, 0, 0, 0, 0, 0, 0.188235, 0.898039, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0.039216, 0.639216, 0.933333, 0.988235, 0.913725, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0.113725, 0.843137, - 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235294, 0.988235, 0.992157, 0.988235, 0.815686, - 0.07451, 0, 0, 0, 0, 0, 0, 0, 0.333333, 0.988235, 0.988235, 0.552941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0.211765, 0.878431, 0.988235, 0.992157, 0.701961, 0.329412, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0.698039, - 0.988235, 0.913725, 0.145098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.890196, 0.988235, 0.988235, - 0.745098, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.882353, 0.988235, 0.568627, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0.2, 0.933333, 0.992157, 0.992157, 0.992157, 0.447059, 0.294118, 0, 0, 0, 0, 0, 0, 0, 0, 0.447059, - 0.992157, 0.768627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.988235, 0.988235, 0.988235, - 0.992157, 0.47451, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.933333, 0.87451, 0.509804, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0.992157, 0.988235, 0.937255, 0.792157, 0.988235, 0.894118, 0.082353, 0, 0, 0, 0, 0, 0, - 0.027451, 0.647059, 0.992157, 0.654902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.913725, - 0.329412, 0.376471, 0.184314, 0, 0, 0, 0, 0, 0, 0.027451, 0.513725, 0.988235, 0.635294, 0.219608, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.929412, 0.988235, 0.988235, 0.741176, 0.309804, 0, 0, 0, 0, - 0, 0, 0.529412, 0.988235, 0.678431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.223529, 0.992157, - 0.992157, 1, 0.992157, 0.992157, 0.992157, 0.992157, 1, 0.992157, 0.992157, 0.882353, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023529, 0.478431, 0.654902, 0.658824, 0.952941, 0.988235, 0.988235, - 0.988235, 0.992157, 0.988235, 0.729412, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0.196078, 0.647059, 0.764706, 0.764706, 0.768627, 0.580392, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0]]] +TEST_DATA = [[[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.215686, 0.533333, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.67451, 0.992157, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0.070588, 0.886275, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.192157, + 0.070588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.670588, 0.992157, + 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.117647, 0.933333, 0.858824, 0.313725, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.090196, 0.858824, 0.992157, 0.831373, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0.141176, 0.992157, 0.992157, 0.611765, 0.054902, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.258824, 0.992157, 0.992157, 0.529412, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0.368627, 0.992157, 0.992157, 0.419608, 0.003922, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0.094118, 0.835294, 0.992157, 0.992157, 0.517647, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0.603922, 0.992157, 0.992157, 0.992157, 0.603922, + 0.545098, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.992157, + 0.956863, 0.062745, 0, 0, 0, 0, 0, 0, 0, 0, 0.011765, 0.666667, 0.992157, + 0.992157, 0.992157, 0.992157, 0.992157, 0.745098, 0.137255, 0, 0, 0, 0, 0, + 0.152941, 0.866667, 0.992157, 0.992157, 0.521569, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0.070588, 0.992157, 0.992157, 0.992157, 0.803922, 0.352941, 0.745098, + 0.992157, 0.945098, 0.317647, 0, 0, 0, 0, 0.580392, 0.992157, 0.992157, + 0.764706, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, + 0.776471, 0.043137, 0, 0.007843, 0.27451, 0.882353, 0.941176, 0.176471, 0, + 0, 0.180392, 0.898039, 0.992157, 0.992157, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0.070588, 0.992157, 0.992157, 0.713725, 0, 0, 0, 0, 0.627451, + 0.992157, 0.729412, 0.062745, 0, 0.509804, 0.992157, 0.992157, 0.776471, + 0.035294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.494118, 0.992157, 0.992157, + 0.968627, 0.168627, 0, 0, 0, 0.423529, 0.992157, 0.992157, 0.364706, 0, + 0.717647, 0.992157, 0.992157, 0.317647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0.533333, 0.992157, 0.984314, 0.945098, 0.603922, 0, 0, 0, 0.003922, + 0.466667, 0.992157, 0.988235, 0.976471, 0.992157, 0.992157, 0.788235, + 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.686275, 0.882353, 0.364706, 0, + 0, 0, 0, 0, 0, 0.098039, 0.588235, 0.992157, 0.992157, 0.992157, 0.980392, + 0.305882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.101961, 0.67451, 0.321569, + 0, 0, 0, 0, 0, 0, 0, 0.105882, 0.733333, 0.976471, 0.811765, 0.713725, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.65098, 0.992157, 0.321569, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0.25098, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 0.94902, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0.968627, 0.764706, 0.152941, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.498039, 0.25098, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 +]], [[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0.298039, 0.333333, 0.333333, 0.333333, 0.337255, + 0.333333, 0.333333, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0.027451, 0.223529, 0.776471, 0.964706, 0.988235, 0.988235, 0.988235, + 0.992157, 0.988235, 0.988235, 0.780392, 0.098039, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0.14902, 0.698039, 0.988235, 0.992157, 0.988235, 0.901961, + 0.87451, 0.568627, 0.882353, 0.976471, 0.988235, 0.988235, 0.501961, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.647059, 0.988235, 0.988235, + 0.745098, 0.439216, 0.098039, 0, 0, 0, 0.572549, 0.988235, 0.988235, + 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157, + 0.941176, 0.247059, 0, 0, 0, 0, 0, 0, 0.188235, 0.898039, 0.992157, + 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.039216, 0.639216, 0.933333, + 0.988235, 0.913725, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0.113725, 0.843137, + 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235294, 0.988235, + 0.992157, 0.988235, 0.815686, 0.07451, 0, 0, 0, 0, 0, 0, 0, 0.333333, + 0.988235, 0.988235, 0.552941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.211765, + 0.878431, 0.988235, 0.992157, 0.701961, 0.329412, 0.109804, 0, 0, 0, 0, 0, + 0, 0, 0.698039, 0.988235, 0.913725, 0.145098, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0.188235, 0.890196, 0.988235, 0.988235, 0.745098, 0.047059, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0.882353, 0.988235, 0.568627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, + 0.933333, 0.992157, 0.992157, 0.992157, 0.447059, 0.294118, 0, 0, 0, 0, 0, + 0, 0, 0, 0.447059, 0.992157, 0.768627, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0.623529, 0.988235, 0.988235, 0.988235, 0.988235, 0.992157, 0.47451, 0, 0, + 0, 0, 0, 0, 0, 0.188235, 0.933333, 0.87451, 0.509804, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0.992157, 0.988235, 0.937255, 0.792157, 0.988235, 0.894118, + 0.082353, 0, 0, 0, 0, 0, 0, 0.027451, 0.647059, 0.992157, 0.654902, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0.623529, 0.988235, 0.913725, 0.329412, 0.376471, + 0.184314, 0, 0, 0, 0, 0, 0, 0.027451, 0.513725, 0.988235, 0.635294, + 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.929412, 0.988235, + 0.988235, 0.741176, 0.309804, 0, 0, 0, 0, 0, 0, 0.529412, 0.988235, + 0.678431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.223529, 0.992157, + 0.992157, 1, 0.992157, 0.992157, 0.992157, 0.992157, 1, 0.992157, 0.992157, + 0.882353, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.023529, + 0.478431, 0.654902, 0.658824, 0.952941, 0.988235, 0.988235, 0.988235, + 0.992157, 0.988235, 0.729412, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0.196078, 0.647059, 0.764706, 0.764706, 0.768627, + 0.580392, 0.047059, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 +]]] def main(): conf = parse_config("./mnist_model/trainer_config.py", "") print conf.data_config.load_data_args - network = swig_paddle.GradientMachine.createFromConfigProto(conf.model_config) + network = swig_paddle.GradientMachine.createFromConfigProto( + conf.model_config) assert isinstance(network, swig_paddle.GradientMachine) # For code hint. network.loadParameters("./mnist_model/") converter = DataProviderConverter([dense_vector(784)]) diff --git a/doc_cn/concepts/trainer_config.py b/doc_cn/concepts/trainer_config.py index 8d8c79fb39e0c..3eccbd7bc11f4 100644 --- a/doc_cn/concepts/trainer_config.py +++ b/doc_cn/concepts/trainer_config.py @@ -1,23 +1,29 @@ from paddle.trainer_config_helpers import * -define_py_data_sources2(train_list='train.list', - test_list='test.list', - module='provider', - obj='process') +define_py_data_sources2( + train_list='train.list', + test_list='test.list', + module='provider', + obj='process') settings( batch_size=128, learning_rate=1e-3, learning_method=AdamOptimizer(), - regularization=L2Regularization(0.5) -) + regularization=L2Regularization(0.5)) img = data_layer(name='pixel', size=28 * 28) -hidden1 = simple_img_conv_pool(input=img, filter_size=3, num_filters=32, pool_size=3, - num_channel=1) +hidden1 = simple_img_conv_pool( + input=img, filter_size=3, num_filters=32, pool_size=3, num_channel=1) -hidden2 = fc_layer(input=hidden1, size=200, act=TanhActivation(), - layer_attr=ExtraAttr(drop_rate=0.5)) +hidden2 = fc_layer( + input=hidden1, + size=200, + act=TanhActivation(), + layer_attr=ExtraAttr(drop_rate=0.5)) predict = fc_layer(input=hidden2, size=10, act=SoftmaxActivation()) -outputs(classification_cost(input=predict, label=data_layer(name='label', size=10))) +outputs( + classification_cost( + input=predict, label=data_layer( + name='label', size=10))) diff --git a/doc_cn/faq/word2vec_config.py b/doc_cn/faq/word2vec_config.py index e347252476eab..866b40c3d4c96 100644 --- a/doc_cn/faq/word2vec_config.py +++ b/doc_cn/faq/word2vec_config.py @@ -1,8 +1,12 @@ -... # the settings and define data provider is omitted. -DICT_DIM=3000 # dictionary dimension. -word_ids=data_layer('word_ids', size=DICT_DIM) +... # the settings and define data provider is omitted. +DICT_DIM = 3000 # dictionary dimension. +word_ids = data_layer('word_ids', size=DICT_DIM) -emb = embedding_layer(input=word_ids, size=256, param_attr=ParamAttr(sparse_update=True)) +emb = embedding_layer( + input=word_ids, size=256, param_attr=ParamAttr(sparse_update=True)) emb_sum = pooling_layer(input=emb, pooling_type=SumPooling()) predict = fc_layer(input=emb_sum, size=DICT_DIM, act=Softmax()) -outputs(classification_cost(input=predict, label=data_layer('label', size=DICT_DIM))) \ No newline at end of file +outputs( + classification_cost( + input=predict, label=data_layer( + 'label', size=DICT_DIM))) diff --git a/doc_cn/faq/word2vec_dataprovider.py b/doc_cn/faq/word2vec_dataprovider.py index a0a39080cece9..ec2753a7d01d7 100644 --- a/doc_cn/faq/word2vec_dataprovider.py +++ b/doc_cn/faq/word2vec_dataprovider.py @@ -1,8 +1,10 @@ -DICT_DIM=3000 +DICT_DIM = 3000 + + @provider(input_types=[integer_sequence(DICT_DIM), integer_value(DICT_DIM)]) def process(settings, filename): - with open(filename) as f: - # yield word ids to predict inner word id - # such as [28, 29, 10, 4], 4 - # It means the sentance is 28, 29, 4, 10, 4. - yield read_next_from_file(f) \ No newline at end of file + with open(filename) as f: + # yield word ids to predict inner word id + # such as [28, 29, 10, 4], 4 + # It means the sentance is 28, 29, 4, 10, 4. + yield read_next_from_file(f) diff --git a/doc_cn/ui/data_provider/mnist_config.py b/doc_cn/ui/data_provider/mnist_config.py index 7ba344338c374..39becff03b08f 100644 --- a/doc_cn/ui/data_provider/mnist_config.py +++ b/doc_cn/ui/data_provider/mnist_config.py @@ -1,8 +1,9 @@ from paddle.trainer_config_helpers import * -define_py_data_sources2(train_list='train.list', - test_list=None, - module='mnist_provider', - obj='process') +define_py_data_sources2( + train_list='train.list', + test_list=None, + module='mnist_provider', + obj='process') img = data_layer(name='pixel', size=784) label = data_layer(name='label', size=10) diff --git a/doc_cn/ui/data_provider/mnist_provider.dict.py b/doc_cn/ui/data_provider/mnist_provider.dict.py index bf13b56372b56..2ba0b126a0d62 100644 --- a/doc_cn/ui/data_provider/mnist_provider.dict.py +++ b/doc_cn/ui/data_provider/mnist_provider.dict.py @@ -2,10 +2,9 @@ # Define a py data provider -@provider(input_types={ - 'pixel': dense_vector(28 * 28), - 'label': integer_value(10) -}) +@provider( + input_types={'pixel': dense_vector(28 * 28), + 'label': integer_value(10)}) def process(settings, filename): # settings is not used currently. f = open(filename, 'r') # open one of training file diff --git a/doc_cn/ui/data_provider/mnist_provider.py b/doc_cn/ui/data_provider/mnist_provider.py index 92f1915c10725..8b828641d5573 100644 --- a/doc_cn/ui/data_provider/mnist_provider.py +++ b/doc_cn/ui/data_provider/mnist_provider.py @@ -2,10 +2,7 @@ # Define a py data provider -@provider(input_types=[ - dense_vector(28 * 28), - integer_value(10) -]) +@provider(input_types=[dense_vector(28 * 28), integer_value(10)]) def process(settings, filename): # settings is not used currently. f = open(filename, 'r') # open one of training file diff --git a/doc_cn/ui/data_provider/sentimental_config.py b/doc_cn/ui/data_provider/sentimental_config.py index 051f75e32b5c0..7ce71608a2372 100644 --- a/doc_cn/ui/data_provider/sentimental_config.py +++ b/doc_cn/ui/data_provider/sentimental_config.py @@ -3,9 +3,12 @@ dictionary = dict() ... # read dictionary from outside -define_py_data_sources2(train_list='train.list', test_list=None, - module='sentimental_provider', obj='process', - # above codes same as mnist sample. - args={ # pass to provider. - 'dictionary': dictionary - }) +define_py_data_sources2( + train_list='train.list', + test_list=None, + module='sentimental_provider', + obj='process', + # above codes same as mnist sample. + args={ # pass to provider. + 'dictionary': dictionary + }) diff --git a/doc_cn/ui/data_provider/sentimental_provider.py b/doc_cn/ui/data_provider/sentimental_provider.py index bda37d7722a0b..0fb0bb88e95a2 100644 --- a/doc_cn/ui/data_provider/sentimental_provider.py +++ b/doc_cn/ui/data_provider/sentimental_provider.py @@ -12,7 +12,8 @@ def on_init(settings, dictionary, **kwargs): # The text is a sequence of integer values, and each value is a word id. # The whole sequence is the sentences that we want to predict its # sentimental. - integer_value(len(dictionary), seq_type=SequenceType), # text input + integer_value( + len(dictionary), seq_type=SequenceType), # text input # label positive/negative integer_value(2) diff --git a/paddle/api/__init__.py b/paddle/api/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/paddle/api/__init__.py +++ b/paddle/api/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/paddle/api/paddle_ld_flags.py b/paddle/api/paddle_ld_flags.py index 05d741f8859ba..ebe00798e8b71 100644 --- a/paddle/api/paddle_ld_flags.py +++ b/paddle/api/paddle_ld_flags.py @@ -29,7 +29,10 @@ whole_start = "" whole_end = "" - LIB_DIRS = ["math", 'utils', 'parameter', "gserver", "api", "cuda", "pserver", "trainer"] + LIB_DIRS = [ + "math", 'utils', 'parameter', "gserver", "api", "cuda", "pserver", + "trainer" + ] PARENT_LIB_DIRS = ['proto'] class PaddleLDFlag(object): @@ -55,19 +58,20 @@ def __init__(self): self.curt = CUDA_LIBRARIES def ldflag_str(self): - return " ".join([self.libs_dir_str(), - self.parent_dir_str(), - self.libs_str()]) + return " ".join( + [self.libs_dir_str(), self.parent_dir_str(), self.libs_str()]) def libs_dir_str(self): libdirs = LIB_DIRS - return " ".join(map(lambda x: "-L" + os.path.join(self.paddle_build_dir, x), - libdirs)) + return " ".join( + map(lambda x: "-L" + os.path.join(self.paddle_build_dir, x), + libdirs)) def parent_dir_str(self): libdirs = PARENT_LIB_DIRS - return " ".join(map(lambda x: "-L" + os.path.join(self.paddle_build_dir, '..', x), - libdirs)) + return " ".join( + map(lambda x: "-L" + os.path.join(self.paddle_build_dir, '..', x), + libdirs)) def libs_str(self): libs = [ @@ -113,10 +117,10 @@ def normalize_flag(self, cmake_flag): return cmake_flag elif cmake_flag.startswith("-l"): # normal link command return cmake_flag - elif cmake_flag in ["gflags-shared", - "gflags-static", - "gflags_nothreads-shared", - "gflags_nothreads-static"]: # special for gflags + elif cmake_flag in [ + "gflags-shared", "gflags-static", "gflags_nothreads-shared", + "gflags_nothreads-static" + ]: # special for gflags assert PaddleLDFlag.cmake_bool(self.gflags_location) return self.gflags_location elif len(cmake_flag) != 0: @@ -132,18 +136,22 @@ def cmake_bool(cmake_str): :type cmake_str: str :rtype: bool """ - if cmake_str in ["FALSE", "OFF", "NO"] or cmake_str.endswith("-NOTFOUND"): + if cmake_str in ["FALSE", "OFF", "NO"] or cmake_str.endswith( + "-NOTFOUND"): return False else: return True + def c_flag(self): if self.with_coverage: return ["-fprofile-arcs", "-ftest-coverage", "-O0", "-g"] else: return None except ImportError: + class PaddleLDFlag(object): def ldflag_str(self): pass + def c_flag(self): pass diff --git a/paddle/api/test/testArguments.py b/paddle/api/test/testArguments.py index daedd2409effc..70fb169fd5c43 100644 --- a/paddle/api/test/testArguments.py +++ b/paddle/api/test/testArguments.py @@ -32,7 +32,7 @@ def test_load_arguments(self): iv = args.getSlotIds(0) assert isinstance(iv, swig_paddle.IVector) np_arr = iv.toNumpyArrayInplace() - self.assertEqual(np_arr.shape, (6,)) + self.assertEqual(np_arr.shape, (6, )) if __name__ == '__main__': diff --git a/paddle/api/test/testGradientMachine.py b/paddle/api/test/testGradientMachine.py index 59b36a012a239..e12613fbb8a66 100644 --- a/paddle/api/test/testGradientMachine.py +++ b/paddle/api/test/testGradientMachine.py @@ -30,8 +30,8 @@ def test_create_gradient_machine(self): self.assertIsNotNone(model_config) machine = swig_paddle.GradientMachine.createByModelConfig( model_config, swig_paddle.CREATE_MODE_NORMAL, - swig_paddle.ParameterOptimizer.create( - opt_config).getParameterTypes()) + swig_paddle.ParameterOptimizer.create(opt_config).getParameterTypes( + )) self.assertIsNotNone(machine) ipt, _ = util.loadMNISTTrainData() output = swig_paddle.Arguments.createArguments(0) @@ -43,7 +43,7 @@ def test_create_gradient_machine(self): assert isinstance(param, swig_paddle.Parameter) val = param.getBuf(swig_paddle.PARAMETER_VALUE) assert isinstance(val, swig_paddle.Vector) - arr = numpy.full((len(val),), 0.1, dtype="float32") + arr = numpy.full((len(val), ), 0.1, dtype="float32") val.copyFromNumpyArray(arr) param_config = param.getConfig().toProto() assert isinstance(param_config, diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index 2216ef30a58b0..11035a9281656 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -69,7 +69,8 @@ def test_createDenseMat(self): def test_numpy(self): numpy_mat = np.matrix([[1, 2], [3, 4], [5, 6]], dtype="float32") m = swig_paddle.Matrix.createCpuDenseFromNumpy(numpy_mat) - self.assertEqual((int(m.getHeight()), int(m.getWidth())), numpy_mat.shape) + self.assertEqual( + (int(m.getHeight()), int(m.getWidth())), numpy_mat.shape) # the numpy matrix and paddle matrix shared the same memory. numpy_mat[0, 1] = 342.23 diff --git a/paddle/api/test/testTrain.py b/paddle/api/test/testTrain.py index 7759118a3d9d1..a3ba4eaaa69b3 100644 --- a/paddle/api/test/testTrain.py +++ b/paddle/api/test/testTrain.py @@ -98,7 +98,8 @@ def update_callback(param): cost_vec = outArgs.getSlotValue(0) assert isinstance(cost_vec, swig_paddle.Matrix) cost_vec = cost_vec.copyToNumpyMat() - print 'Finish Batch', batch_id, 'with cost ', cost_vec.sum() / batch_size + print 'Finish Batch', batch_id, 'with cost ', cost_vec.sum( + ) / batch_size batch_id += 1 for optimizer in optimizers: diff --git a/paddle/api/test/testTrainConfig.py b/paddle/api/test/testTrainConfig.py index 22148e31915da..77e0cd37d566d 100644 --- a/paddle/api/test/testTrainConfig.py +++ b/paddle/api/test/testTrainConfig.py @@ -1,9 +1,6 @@ from paddle.trainer_config_helpers import * -settings( - batch_size=100, - learning_method=AdamOptimizer() -) +settings(batch_size=100, learning_method=AdamOptimizer()) din = data_layer(name='input', size=784) diff --git a/paddle/api/test/testTrainer.py b/paddle/api/test/testTrainer.py index da69a60f84f4d..edd5a2da5785c 100644 --- a/paddle/api/test/testTrainer.py +++ b/paddle/api/test/testTrainer.py @@ -17,9 +17,9 @@ from py_paddle import swig_paddle import util + def main(): - trainer_config = parse_config( - "./testTrainConfig.py", "") + trainer_config = parse_config("./testTrainConfig.py", "") model = swig_paddle.GradientMachine.createFromConfigProto( trainer_config.model_config) trainer = swig_paddle.Trainer.create(trainer_config, model) @@ -56,7 +56,7 @@ def main(): logger.info('test cost=%f' % (cost / num)) trainer.finishTrain() - + if __name__ == '__main__': swig_paddle.initPaddle("--use_gpu=0", "--trainer_count=1") diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index f5b5d0e32e420..5226df79eea3b 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -112,5 +112,6 @@ def testCopyFromNumpy(self): if __name__ == '__main__': - swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") + swig_paddle.initPaddle("--use_gpu=1" + if swig_paddle.isGpuVersion() else "--use_gpu=0") unittest.main() diff --git a/paddle/gserver/tests/__init__.py b/paddle/gserver/tests/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/paddle/gserver/tests/__init__.py +++ b/paddle/gserver/tests/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/paddle/gserver/tests/pyDataProvider.py b/paddle/gserver/tests/pyDataProvider.py index c3155e7adea04..91863b4175b1a 100644 --- a/paddle/gserver/tests/pyDataProvider.py +++ b/paddle/gserver/tests/pyDataProvider.py @@ -16,72 +16,79 @@ import struct import traceback + def header_creator(): ret = "" - ret += struct.pack('i', 3) # slot num - ret += struct.pack('i', 1) # sequence flag - ret += struct.pack('i', 0) # slot0 dense type - ret += struct.pack('i', 3) # slot0 dim - ret += struct.pack('i', 1) # slot1 sparse non value type - ret += struct.pack('i', 7) # slot1 dim - ret += struct.pack('i', 3) # slot2 index type - ret += struct.pack('i', 2) # slot2 dim + ret += struct.pack('i', 3) # slot num + ret += struct.pack('i', 1) # sequence flag + ret += struct.pack('i', 0) # slot0 dense type + ret += struct.pack('i', 3) # slot0 dim + ret += struct.pack('i', 1) # slot1 sparse non value type + ret += struct.pack('i', 7) # slot1 dim + ret += struct.pack('i', 3) # slot2 index type + ret += struct.pack('i', 2) # slot2 dim return ret + def dense_value_creator(sample_num): ret = "" - ret += struct.pack('i', sample_num) # slot0 sample num - for i in range(sample_num): # slot0 value + ret += struct.pack('i', sample_num) # slot0 sample num + for i in range(sample_num): # slot0 value ret += struct.pack('f', 1.0) ret += struct.pack('f', 2.0) ret += struct.pack('f', 3.0) return ret + def sparse_value_creator(sample_num): ret = "" - ret += struct.pack('i', sample_num) # slot1 sample num - for i in range(sample_num): # slot1 index + ret += struct.pack('i', sample_num) # slot1 sample num + for i in range(sample_num): # slot1 index ret += struct.pack('i', i * 2) - ret += struct.pack('i', sample_num * 2) #slot1 length - for i in range(sample_num): # slot1 value + ret += struct.pack('i', sample_num * 2) #slot1 length + for i in range(sample_num): # slot1 value ret += struct.pack('i', 1) ret += struct.pack('i', 2) return ret + def index_value_creator(sample_num): ret = "" - ret += struct.pack('i', sample_num) # slot2 sample num - for i in range(sample_num): # slot2 value + ret += struct.pack('i', sample_num) # slot2 sample num + for i in range(sample_num): # slot2 value ret += struct.pack('i', 0) return ret + def sequenceStartPositions_creator(): ret = "" - ret += struct.pack('i', 2) # slot0 sequence num - ret += struct.pack('i', 0) # slot0 sequence value1 - ret += struct.pack('i', 1) # slot0 sequence value2 - ret += struct.pack('i', 1) # slot1 sequence num - ret += struct.pack('i', 0) # slot1 sequence value1 - ret += struct.pack('i', 2) # slot2 sequence num - ret += struct.pack('i', 0) # slot2 sequence value1 - ret += struct.pack('i', 1) # slot2 sequence value2 + ret += struct.pack('i', 2) # slot0 sequence num + ret += struct.pack('i', 0) # slot0 sequence value1 + ret += struct.pack('i', 1) # slot0 sequence value2 + ret += struct.pack('i', 1) # slot1 sequence num + ret += struct.pack('i', 0) # slot1 sequence value1 + ret += struct.pack('i', 2) # slot2 sequence num + ret += struct.pack('i', 0) # slot2 sequence value1 + ret += struct.pack('i', 1) # slot2 sequence value2 return ret + def subSequenceStartPositions_creator(): ret = "" - ret += struct.pack('i', 3) # slot0 subsequence num - ret += struct.pack('i', 0) # slot0 subsequence value1 - ret += struct.pack('i', 1) # slot0 subsequence value2 - ret += struct.pack('i', 2) # slot0 subsequence value3 - ret += struct.pack('i', 2) # slot1 subsequence num - ret += struct.pack('i', 0) # slot1 subsequence value1 - ret += struct.pack('i', 1) # slot1 subsequence value2 - ret += struct.pack('i', 3) # slot2 subsequence num - ret += struct.pack('i', 0) # slot2 subsequence value1 - ret += struct.pack('i', 1) # slot2 subsequence value2 - ret += struct.pack('i', 2) # slot2 subsequence value3 + ret += struct.pack('i', 3) # slot0 subsequence num + ret += struct.pack('i', 0) # slot0 subsequence value1 + ret += struct.pack('i', 1) # slot0 subsequence value2 + ret += struct.pack('i', 2) # slot0 subsequence value3 + ret += struct.pack('i', 2) # slot1 subsequence num + ret += struct.pack('i', 0) # slot1 subsequence value1 + ret += struct.pack('i', 1) # slot1 subsequence value2 + ret += struct.pack('i', 3) # slot2 subsequence num + ret += struct.pack('i', 0) # slot2 subsequence value1 + ret += struct.pack('i', 1) # slot2 subsequence value2 + ret += struct.pack('i', 2) # slot2 subsequence value3 return ret + class SimpleDataProvider: def __init__(self, *file_list): self.file_list = file_list @@ -93,17 +100,18 @@ def reset(self): pass def getHeader(self): - return header_creator() + return header_creator() def getNextBatch(self, batch_size): ret = "" - ret += struct.pack('i', 2) # batch size - ret += dense_value_creator(2) # slot0 - ret += sparse_value_creator(2) # slot1 - ret += index_value_creator(2) # slot2 + ret += struct.pack('i', 2) # batch size + ret += dense_value_creator(2) # slot0 + ret += sparse_value_creator(2) # slot1 + ret += index_value_creator(2) # slot2 ret += sequenceStartPositions_creator() return ret + class SimpleNestDataProvider: def __init__(self, *file_list): self.file_list = file_list @@ -119,14 +127,15 @@ def getHeader(self): def getNextBatch(self, batch_size): ret = "" - ret += struct.pack('i', 2) # batch size - ret += dense_value_creator(4) # slot0 - ret += sparse_value_creator(4) # slot1 - ret += index_value_creator(4) # slot2 + ret += struct.pack('i', 2) # batch size + ret += dense_value_creator(4) # slot0 + ret += sparse_value_creator(4) # slot1 + ret += index_value_creator(4) # slot2 ret += sequenceStartPositions_creator() ret += subSequenceStartPositions_creator() return ret + if __name__ == "__main__": # test code data_provider = SimpleDataProvider('./test_batch') diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/gserver/tests/rnn_data_provider.py index 321c78cb1741b..715ac08a42d05 100644 --- a/paddle/gserver/tests/rnn_data_provider.py +++ b/paddle/gserver/tests/rnn_data_provider.py @@ -22,18 +22,20 @@ [[[0, 2], [2, 5], [0, 1, 2]], 1], ] + # Used for sequence_nest_rnn.conf -@provider(input_types=[integer_value_sub_sequence(10), - integer_value(3)], - should_shuffle=False) +@provider( + input_types=[integer_value_sub_sequence(10), integer_value(3)], + should_shuffle=False) def process_subseq(settings, file_name): for d in data: yield d + # Used for sequence_rnn.conf -@provider(input_types=[integer_value_sequence(10), - integer_value(3)], - should_shuffle=False) +@provider( + input_types=[integer_value_sequence(10), integer_value(3)], + should_shuffle=False) def process_seq(settings, file_name): for d in data: seq = [] @@ -41,18 +43,20 @@ def process_seq(settings, file_name): seq += subseq yield seq, d[1] + # Used for sequence_nest_rnn_multi_input.conf -@provider(input_types=[integer_value_sub_sequence(10), - integer_value(3)], - should_shuffle=False) +@provider( + input_types=[integer_value_sub_sequence(10), integer_value(3)], + should_shuffle=False) def process_subseq2(settings, file_name): for d in data: yield d + # Used for sequence_rnn_multi_input.conf -@provider(input_types=[integer_value_sequence(10), - integer_value(3)], - should_shuffle=False) +@provider( + input_types=[integer_value_sequence(10), integer_value(3)], + should_shuffle=False) def process_seq2(settings, file_name): for d in data: seq = [] @@ -60,31 +64,34 @@ def process_seq2(settings, file_name): seq += subseq yield seq, d[1] + ########################################################### data2 = [ - [[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]] ,0], - [[[0, 2], [2, 5], [0, 1, 2]],[[1, 5], [4], [2, 3, 6, 1]], 1], + [[[1, 2], [4, 5, 2]], [[5, 4, 1], [3, 1]], 0], + [[[0, 2], [2, 5], [0, 1, 2]], [[1, 5], [4], [2, 3, 6, 1]], 1], ] + # Used for sequence_nest_rnn_multi_unequalength_inputs.conf -@provider(input_types=[integer_value_sub_sequence(10), - integer_value_sub_sequence(10), - integer_value(2)], - should_shuffle=False) +@provider( + input_types=[ + integer_value_sub_sequence(10), integer_value_sub_sequence(10), + integer_value(2) + ], + should_shuffle=False) def process_unequalength_subseq(settings, file_name): for d in data2: yield d # Used for sequence_rnn_multi_unequalength_inputs.conf -@provider(input_types=[integer_value_sequence(10), - integer_value_sequence(10), - integer_value(2)], - should_shuffle=False) +@provider( + input_types=[ + integer_value_sequence(10), integer_value_sequence(10), integer_value(2) + ], + should_shuffle=False) def process_unequalength_seq(settings, file_name): for d in data2: - words1=reduce(lambda x,y: x+y, d[0]) - words2=reduce(lambda x,y: x+y, d[1]) + words1 = reduce(lambda x, y: x + y, d[0]) + words2 = reduce(lambda x, y: x + y, d[1]) yield words1, words2, d[2] - - diff --git a/paddle/gserver/tests/sequenceGen.py b/paddle/gserver/tests/sequenceGen.py index b166e778d7a33..fab876fd30da0 100644 --- a/paddle/gserver/tests/sequenceGen.py +++ b/paddle/gserver/tests/sequenceGen.py @@ -20,8 +20,9 @@ def hook(settings, dict_file, **kwargs): settings.word_dict = dict_file - settings.input_types = [integer_value_sequence(len(settings.word_dict)), - integer_value(3)] + settings.input_types = [ + integer_value_sequence(len(settings.word_dict)), integer_value(3) + ] settings.logger.info('dict len : %d' % (len(settings.word_dict))) @@ -32,16 +33,19 @@ def process(settings, file_name): label, comment = line.strip().split('\t') label = int(''.join(label.split())) words = comment.split() - word_slot = [settings.word_dict[w] for w in words if - w in settings.word_dict] + word_slot = [ + settings.word_dict[w] for w in words if w in settings.word_dict + ] yield word_slot, label ## for hierarchical sequence network def hook2(settings, dict_file, **kwargs): settings.word_dict = dict_file - settings.input_types = [integer_value_sub_sequence(len(settings.word_dict)), - integer_value_sequence(3)] + settings.input_types = [ + integer_value_sub_sequence(len(settings.word_dict)), + integer_value_sequence(3) + ] settings.logger.info('dict len : %d' % (len(settings.word_dict))) @@ -55,8 +59,10 @@ def process2(settings, file_name): label, comment = line.strip().split('\t') label = int(''.join(label.split())) words = comment.split() - word_slot = [settings.word_dict[w] for w in words if - w in settings.word_dict] + word_slot = [ + settings.word_dict[w] for w in words + if w in settings.word_dict + ] label_list.append(label) word_slot_list.append(word_slot) else: diff --git a/paddle/gserver/tests/sequence_layer_group.conf b/paddle/gserver/tests/sequence_layer_group.conf index ac031b31280df..087aa96ccb5a7 100644 --- a/paddle/gserver/tests/sequence_layer_group.conf +++ b/paddle/gserver/tests/sequence_layer_group.conf @@ -21,15 +21,16 @@ dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count -define_py_data_sources2(train_list='gserver/tests/Sequence/train.list', - test_list=None, - module='sequenceGen', - obj='process', - args={"dict_file":dict_file}) +define_py_data_sources2( + train_list='gserver/tests/Sequence/train.list', + test_list=None, + module='sequenceGen', + obj='process', + args={"dict_file": dict_file}) settings(batch_size=5) ######################## network configure ################################ -dict_dim = len(open(dict_path,'r').readlines()) +dict_dim = len(open(dict_path, 'r').readlines()) word_dim = 128 hidden_dim = 256 label_dim = 3 @@ -39,21 +40,24 @@ data = data_layer(name="word", size=dict_dim) emb = embedding_layer(input=data, size=word_dim) # (lstm_input + lstm) is equal to lstmemory -with mixed_layer(size=hidden_dim*4) as lstm_input: +with mixed_layer(size=hidden_dim * 4) as lstm_input: lstm_input += full_matrix_projection(input=emb) -lstm = lstmemory_group(input=lstm_input, - size=hidden_dim, - act=TanhActivation(), - gate_act=SigmoidActivation(), - state_act=TanhActivation(), - lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) +lstm = lstmemory_group( + input=lstm_input, + size=hidden_dim, + act=TanhActivation(), + gate_act=SigmoidActivation(), + state_act=TanhActivation(), + lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) lstm_last = last_seq(input=lstm) -with mixed_layer(size=label_dim, - act=SoftmaxActivation(), - bias_attr=True) as output: +with mixed_layer( + size=label_dim, act=SoftmaxActivation(), bias_attr=True) as output: output += full_matrix_projection(input=lstm_last) -outputs(classification_cost(input=output, label=data_layer(name="label", size=1))) +outputs( + classification_cost( + input=output, label=data_layer( + name="label", size=1))) diff --git a/paddle/gserver/tests/sequence_nest_layer_group.conf b/paddle/gserver/tests/sequence_nest_layer_group.conf index 38c60b657b969..93a0f6da7905c 100644 --- a/paddle/gserver/tests/sequence_nest_layer_group.conf +++ b/paddle/gserver/tests/sequence_nest_layer_group.conf @@ -21,15 +21,16 @@ dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count -define_py_data_sources2(train_list='gserver/tests/Sequence/train.list.nest', - test_list=None, - module='sequenceGen', - obj='process2', - args={"dict_file":dict_file}) +define_py_data_sources2( + train_list='gserver/tests/Sequence/train.list.nest', + test_list=None, + module='sequenceGen', + obj='process2', + args={"dict_file": dict_file}) settings(batch_size=2) ######################## network configure ################################ -dict_dim = len(open(dict_path,'r').readlines()) +dict_dim = len(open(dict_path, 'r').readlines()) word_dim = 128 hidden_dim = 256 label_dim = 3 @@ -38,37 +39,46 @@ data = data_layer(name="word", size=dict_dim) emb_group = embedding_layer(input=data, size=word_dim) + # (lstm_input + lstm) is equal to lstmemory def lstm_group(lstm_group_input): - with mixed_layer(size=hidden_dim*4) as group_input: - group_input += full_matrix_projection(input=lstm_group_input) + with mixed_layer(size=hidden_dim * 4) as group_input: + group_input += full_matrix_projection(input=lstm_group_input) - lstm_output = lstmemory_group(input=group_input, - name="lstm_group", - size=hidden_dim, - act=TanhActivation(), - gate_act=SigmoidActivation(), - state_act=TanhActivation(), - lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) + lstm_output = lstmemory_group( + input=group_input, + name="lstm_group", + size=hidden_dim, + act=TanhActivation(), + gate_act=SigmoidActivation(), + state_act=TanhActivation(), + lstm_layer_attr=ExtraLayerAttribute(error_clipping_threshold=50)) return lstm_output -lstm_nest_group = recurrent_group(input=SubsequenceInput(emb_group), - step=lstm_group, - name="lstm_nest_group") + +lstm_nest_group = recurrent_group( + input=SubsequenceInput(emb_group), step=lstm_group, name="lstm_nest_group") # hasSubseq ->(seqlastins) seq -lstm_last = last_seq(input=lstm_nest_group, agg_level=AggregateLevel.EACH_SEQUENCE) +lstm_last = last_seq( + input=lstm_nest_group, agg_level=AggregateLevel.EACH_SEQUENCE) # seq ->(expand) hasSubseq -lstm_expand = expand_layer(input=lstm_last, expand_as=emb_group, expand_level=ExpandLevel.FROM_SEQUENCE) +lstm_expand = expand_layer( + input=lstm_last, + expand_as=emb_group, + expand_level=ExpandLevel.FROM_SEQUENCE) # hasSubseq ->(average) seq -lstm_average = pooling_layer(input=lstm_expand, - pooling_type=AvgPooling(), - agg_level=AggregateLevel.EACH_SEQUENCE) +lstm_average = pooling_layer( + input=lstm_expand, + pooling_type=AvgPooling(), + agg_level=AggregateLevel.EACH_SEQUENCE) -with mixed_layer(size=label_dim, - act=SoftmaxActivation(), - bias_attr=True) as output: +with mixed_layer( + size=label_dim, act=SoftmaxActivation(), bias_attr=True) as output: output += full_matrix_projection(input=lstm_average) -outputs(classification_cost(input=output, label=data_layer(name="label", size=1))) +outputs( + classification_cost( + input=output, label=data_layer( + name="label", size=1))) diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/gserver/tests/test_PyDataProvider2.py index 71c3335231e52..7ca30198fb1d0 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.py +++ b/paddle/gserver/tests/test_PyDataProvider2.py @@ -33,16 +33,19 @@ def test_init_hooker(setting, value, **kwargs): setting.value = value -@provider(input_types=[dense_vector(20, seq_type=SequenceType.NO_SEQUENCE)], - init_hook=test_init_hooker) +@provider( + input_types=[dense_vector( + 20, seq_type=SequenceType.NO_SEQUENCE)], + init_hook=test_init_hooker) def test_init_hook(setting, filename): for i in xrange(200): yield setting.value -@provider( - input_types=[ - sparse_binary_vector(30000, seq_type=SequenceType.NO_SEQUENCE)]) +@provider(input_types=[ + sparse_binary_vector( + 30000, seq_type=SequenceType.NO_SEQUENCE) +]) def test_sparse_non_value_no_seq(setting, filename): for i in xrange(200): yield [(i + 1) * (j + 1) for j in xrange(10)] @@ -77,28 +80,28 @@ def test_min_pool_size(setting, filename): yield random.randint(0, 100 - 1) -@provider(input_types=[index_slot(100, seq_type=SequenceType.SEQUENCE)], - can_over_batch_size=False, - calc_batch_size=lambda x: len(x[0])) +@provider( + input_types=[index_slot( + 100, seq_type=SequenceType.SEQUENCE)], + can_over_batch_size=False, + calc_batch_size=lambda x: len(x[0])) def test_can_over_batch_size(setting, filename): for _ in xrange(1 << 10): seq_len = random.randint(0, 99) yield [random.randint(0, 100 - 1) for _ in xrange(seq_len)] -@provider(input_types={'input1':index_slot(10), 'input2': index_slot(10)}) +@provider(input_types={'input1': index_slot(10), 'input2': index_slot(10)}) def test_input_order(setting, filename): for _ in xrange(1000): - yield { - 'input1': 0, - 'input2': 1 - } + yield {'input1': 0, 'input2': 1} -@provider(input_types=[index_slot(10)], - check=True, - check_fail_continue=True, - should_shuffle="123") # also test should shuffle +@provider( + input_types=[index_slot(10)], + check=True, + check_fail_continue=True, + should_shuffle="123") # also test should shuffle def test_check(settings, filename): yield_good_value = False @@ -108,4 +111,3 @@ def test_check(settings, filename): if i < 10: yield_good_value = True yield i - diff --git a/paddle/py_paddle/__init__.py b/paddle/py_paddle/__init__.py index f372068942ea3..f8399f9c63d81 100644 --- a/paddle/py_paddle/__init__.py +++ b/paddle/py_paddle/__init__.py @@ -15,9 +15,10 @@ from util import DataProviderWrapperConverter from dataprovider_converter import DataProviderConverter -__all__ = ['paddle', - 'DataProviderConverter', - 'DataProviderWrapperConverter', # for deprecated usage. - 'loadParameterFile'] +__all__ = [ + 'paddle', + 'DataProviderConverter', + 'DataProviderWrapperConverter', # for deprecated usage. + 'loadParameterFile' +] util.monkeypatches() - diff --git a/paddle/py_paddle/dataprovider_converter.py b/paddle/py_paddle/dataprovider_converter.py index dd2e146d112c0..d64c7b20cb65a 100644 --- a/paddle/py_paddle/dataprovider_converter.py +++ b/paddle/py_paddle/dataprovider_converter.py @@ -45,10 +45,8 @@ def scan(self, dat): def finish_scan(self, argument): assert isinstance(argument, swig_paddle.Arguments) assert isinstance(self.input_type, dp2.InputType) - m = swig_paddle.Matrix.createDense(self.__mat__, - self.__height__, - self.input_type.dim, - False) + m = swig_paddle.Matrix.createDense(self.__mat__, self.__height__, + self.input_type.dim, False) argument.setSlotValue(self.pos, m) @@ -141,8 +139,10 @@ def convert(self, dat, argument=None): assert isinstance(argument, swig_paddle.Arguments) argument.resize(len(self.input_types)) - scanners = [DataProviderConverter.create_scanner(i, each_type) - for i, each_type in enumerate(self.input_types)] + scanners = [ + DataProviderConverter.create_scanner(i, each_type) + for i, each_type in enumerate(self.input_types) + ] for each_sample in dat: for each_step, scanner in zip(each_sample, scanners): @@ -171,11 +171,14 @@ def create_scanner(i, each): assert retv is not None if each.seq_type == dp2.SequenceType.SUB_SEQUENCE: - retv = SequenceScanner(each, i, retv, lambda a, p, seq: - a.setSlotSubSequenceStartPositions(p, seq)) - - if each.seq_type in [dp2.SequenceType.SUB_SEQUENCE, - dp2.SequenceType.SEQUENCE]: - retv = SequenceScanner(each, i, retv, lambda a, p, seq: - a.setSlotSequenceStartPositions(p, seq)) + retv = SequenceScanner( + each, i, retv, + lambda a, p, seq: a.setSlotSubSequenceStartPositions(p, seq)) + + if each.seq_type in [ + dp2.SequenceType.SUB_SEQUENCE, dp2.SequenceType.SEQUENCE + ]: + retv = SequenceScanner( + each, i, retv, + lambda a, p, seq: a.setSlotSequenceStartPositions(p, seq)) return retv diff --git a/paddle/py_paddle/util.py b/paddle/py_paddle/util.py index 53f67a861e7d9..8ebcb346100c2 100644 --- a/paddle/py_paddle/util.py +++ b/paddle/py_paddle/util.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Some Useful method for py_paddle. """ @@ -79,6 +78,7 @@ def wrap(callback): else: return __ParameterCallbackWrapper__(callback).__disown__() + def __arguments_to_numpy__(i, arg): assert isinstance(arg, swig_paddle.Arguments) value = arg.getSlotValue(i) @@ -89,10 +89,8 @@ def __arguments_to_numpy__(i, arg): if ids is not None: assert isinstance(ids, swig_paddle.IVector) ids = ids.copyToNumpyArray() - return { - "value": value, - "id": ids - } + return {"value": value, "id": ids} + def __monkeypatch_gradient_machine__(): """ @@ -102,7 +100,6 @@ def __monkeypatch_gradient_machine__(): swig_paddle.GradientMachine.loadFromConfigFile = \ staticmethod(loadGradientMachine) - def __matrix_to_numpy__(m): if isinstance(m, swig_paddle.Matrix): return m.copyToNumpyMat() @@ -113,9 +110,11 @@ def __matrix_to_numpy__(m): def createFromConfigProto(protoObj, createMode=swig_paddle.CREATE_MODE_NORMAL, - paramTypes=[swig_paddle.PARAMETER_VALUE, - swig_paddle.PARAMETER_GRADIENT, - swig_paddle.PARAMETER_MOMENTUM]): + paramTypes=[ + swig_paddle.PARAMETER_VALUE, + swig_paddle.PARAMETER_GRADIENT, + swig_paddle.PARAMETER_MOMENTUM + ]): """ Create Gradient Machine From Proto object. :param protoObj: Model config @@ -145,8 +144,10 @@ def forwardTest(self, inArgs): """ outArgs = swig_paddle.Arguments.createArguments(0) self.forward(inArgs, outArgs, swig_paddle.PASS_TEST) - return [__arguments_to_numpy__(i, outArgs) for i in xrange( - outArgs.getSlotNum())] + return [ + __arguments_to_numpy__(i, outArgs) + for i in xrange(outArgs.getSlotNum()) + ] swig_paddle.GradientMachine.forwardTest = forwardTest @@ -167,7 +168,10 @@ def backward(self, callback): swig_paddle.GradientMachine.__forwardBackward__ = \ swig_paddle.GradientMachine.forwardBackward - def forwardBackward(self, inArgs, outArgs, passType, + def forwardBackward(self, + inArgs, + outArgs, + passType, callback=swig_paddle.UpdateCallback()): """ GradientMachine forward backward. @@ -315,9 +319,8 @@ def append(self, other): self.cols += other def __call__(self, slot_idx, arg): - mat = swig_paddle.Matrix.createSparse(len(self.indices) - 1, - self.dim, - len(self.cols), True) + mat = swig_paddle.Matrix.createSparse( + len(self.indices) - 1, self.dim, len(self.cols), True) assert isinstance(mat, swig_paddle.Matrix) mat.sparseCopyFrom(self.indices, self.cols) self.putIntoArg(slot_idx, arg, mat) @@ -341,9 +344,8 @@ def append(self, other): self.values += map(lambda x: x[1], other) def __call__(self, slot_idx, arg): - mat = swig_paddle.Matrix.createSparse(len(self.indices) - 1, - self.dim, - len(self.cols), False) + mat = swig_paddle.Matrix.createSparse( + len(self.indices) - 1, self.dim, len(self.cols), False) assert isinstance(mat, swig_paddle.Matrix) mat.sparseCopyFrom(self.indices, self.cols, self.values) self.putIntoArg(slot_idx, arg, mat) @@ -352,8 +354,9 @@ def __call__(self, slot_idx, arg): paddle.trainer.PyDataProviderWrapper.DenseSlot: DenseValueConverter, paddle.trainer.PyDataProviderWrapper.IndexSlot: IdValueConverter, paddle.trainer.PyDataProviderWrapper.SparseNonValueSlot: - SparseNonValueConverter, - paddle.trainer.PyDataProviderWrapper.SparseValueSlot: SparseValueConverter + SparseNonValueConverter, + paddle.trainer.PyDataProviderWrapper.SparseValueSlot: + SparseValueConverter } def __init__(self, use_seq, header): @@ -381,10 +384,9 @@ def convert(self, wrapper_data, argument=None): assert isinstance(argument, swig_paddle.Arguments) argument.resize(len(self.__header__)) - values = map(lambda x: - DataProviderWrapperConverter.__SLOT_VALUE_CONVERTER_MAP__[ - x.__class__](x), - self.__header__) + values = map( + lambda x: DataProviderWrapperConverter.__SLOT_VALUE_CONVERTER_MAP__[x.__class__](x), + self.__header__) if self.__use_seq__: seq_dim = [[] for _ in xrange(self.__header__.__len__())] @@ -394,14 +396,13 @@ def convert(self, wrapper_data, argument=None): for slot_idx, sequence in enumerate(each_sample): for raw_data in sequence: values[slot_idx].append(raw_data) - seq_start_pos[slot_idx].append( - seq_start_pos[slot_idx][-1] + len(sequence)) + seq_start_pos[slot_idx].append(seq_start_pos[slot_idx][-1] + + len(sequence)) seq_dim[slot_idx].append(len(sequence)) for slot_idx in xrange(len(self.__header__)): - argument.setSlotSequenceDim(slot_idx, - swig_paddle.IVector.create( - seq_dim[slot_idx])) + argument.setSlotSequenceDim( + slot_idx, swig_paddle.IVector.create(seq_dim[slot_idx])) argument.setSlotSequenceStartPositions( slot_idx, swig_paddle.IVector.create(seq_start_pos[slot_idx])) @@ -422,7 +423,6 @@ def __call__(self, wrapper_data, argument=None): return self.convert(wrapper_data, argument) - def __monkey_patch_protobuf_objects__(): def ParameterConfig_toProto(self): """ @@ -459,8 +459,7 @@ def OptimizationConfig_createFromProto(protoObj): :return: paddle.OptimizationConfig """ - assert isinstance(protoObj, - paddle.proto.OptimizationConfig) + assert isinstance(protoObj, paddle.proto.OptimizationConfig) return swig_paddle.OptimizationConfig.createFromProtoString( protoObj.SerializeToString()) @@ -475,8 +474,7 @@ def TrainerConfig_createFromProto(protoObj): :param protoObj: proto.TrainerConfig :return: paddle.TrainerConfig """ - assert isinstance(protoObj, - paddle.proto.TrainerConfig) + assert isinstance(protoObj, paddle.proto.TrainerConfig) return swig_paddle.TrainerConfig.createFromProtoString( protoObj.SerializeToString()) @@ -537,6 +535,7 @@ def Trainer_create(config, model=None): assert isinstance(model, swig_paddle.GradientMachine) return swig_paddle.Trainer.__create__( swig_paddle.TrainerConfig.createFromProto(config), model) + swig_paddle.Trainer.create = staticmethod(Trainer_create) swig_paddle.Trainer.__getForwardOutput__ = \ @@ -551,14 +550,19 @@ def getForwardOutput(self): numpy.ndarray. """ outArgs = self.__getForwardOutput__() - return [__arguments_to_numpy__(i, outArgs) for i in xrange( - outArgs.getSlotNum())] + return [ + __arguments_to_numpy__(i, outArgs) + for i in xrange(outArgs.getSlotNum()) + ] swig_paddle.Trainer.getForwardOutput = getForwardOutput + def monkeypatches(): - patches = [__monkeypatch_init_paddle__, __monkeypatch_gradient_machine__, - __monkey_patch_protobuf_objects__, - __monkey_patch_parameter__, __monkey_patch_trainer__] + patches = [ + __monkeypatch_init_paddle__, __monkeypatch_gradient_machine__, + __monkey_patch_protobuf_objects__, __monkey_patch_parameter__, + __monkey_patch_trainer__ + ] for patch in patches: patch() diff --git a/paddle/scripts/cluster_train/conf.py b/paddle/scripts/cluster_train/conf.py index c8fd360e7552e..f1114a59201b9 100644 --- a/paddle/scripts/cluster_train/conf.py +++ b/paddle/scripts/cluster_train/conf.py @@ -13,17 +13,14 @@ # limitations under the License. HOSTS = [ - "root@192.168.100.17", - "root@192.168.100.18", - ] - + "root@192.168.100.17", + "root@192.168.100.18", +] ''' workspace configuration ''' #root dir for workspace, can be set as any director with real user account ROOT_DIR = "/home/paddle" - - ''' network configuration ''' @@ -37,4 +34,4 @@ PADDLE_PORTS_NUM_FOR_SPARSE = 2 #environments setting for all processes in cluster job -LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib64" +LD_LIBRARY_PATH = "/usr/local/cuda/lib64:/usr/lib64" diff --git a/paddle/scripts/cluster_train/paddle.py b/paddle/scripts/cluster_train/paddle.py index 79698c72e619f..7343a600c1bf5 100644 --- a/paddle/scripts/cluster_train/paddle.py +++ b/paddle/scripts/cluster_train/paddle.py @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - """ module for launching cluster job """ import os @@ -23,13 +21,13 @@ import time import signal - from fabric.api import run, put, settings, env, prefix from fabric.tasks import execute #configuration for cluster import conf + def refine_unknown_args(cmd_args): ''' refine unknown parameters to handle some special parameters @@ -37,7 +35,7 @@ def refine_unknown_args(cmd_args): new_args = [] for arg in cmd_args: if arg.startswith("--") and arg.find("=") != -1: - equal_pos = arg.find("=") #find first = pos + equal_pos = arg.find("=") #find first = pos arglist = list(arg) arglist[equal_pos] = " " arg = "".join(arglist) @@ -50,6 +48,7 @@ def refine_unknown_args(cmd_args): new_args.append(arg) return new_args + def kill_process(): ''' kill comments threads @@ -60,6 +59,7 @@ def kill_process(): | awk '{print $2}' \ | xargs kill > /dev/null 2>&1") + def job_prepare(jobdir, data=None): ''' prepare job related workspace data @@ -70,6 +70,7 @@ def job_prepare(jobdir, data=None): This function just prepare all related model and other resources needed at runtime. ''' + def job_create_workspace(jobdir, data=None): ''' prepare job workspace, common file, etc. @@ -94,7 +95,8 @@ def set_nodefile(nodeid): execute(set_nodefile, i, hosts=conf.HOSTS[i]) #clean rubbish caused by exception with settings(warn_only=True): - execute(kill_process, hosts=conf.HOSTS) + execute(kill_process, hosts=conf.HOSTS) + def job_pserver(jobdir, pids=None): ''' @@ -124,9 +126,8 @@ def start_pserver(jobdir, pargs): execute(start_pserver, jobdir, pargs, hosts=conf.HOSTS) -def job_trainer(jobdir, - train_args_dict, - pids=None): + +def job_trainer(jobdir, train_args_dict, pids=None): ''' start paddle trainer ''' @@ -171,9 +172,8 @@ def start_trainer(jobdir, args): train_args += " --trainer_id=" + str(i) execute(start_trainer, jobdir, train_args, hosts=conf.HOSTS[i]) -def job_all(job_package, - jobdir=None, - train_args_dict=None): + +def job_all(job_package, jobdir=None, train_args_dict=None): ''' param job_package param train_args_dict @@ -183,41 +183,52 @@ def job_all(job_package, jobdir = conf.ROOT_DIR + "/JOB" + timestamp job_prepare(jobdir, job_package) job_pserver(jobdir) - time.sleep(5) #wait until pservers completely start + time.sleep(5) #wait until pservers completely start job_trainer(jobdir, train_args_dict) job_clean() + def job_clean(): ''' if starting job failed from paddle internal, the framework always is launched successfully since these process are daemon processes. so this job_clean can alway clean job rubbish process with ctrl+c. ''' + def signal_handler(signal, frame): ''' SIGINT handler ''' + def kill_process(): - run("ps aux \ + run("ps aux \ | grep paddle_process_by_paddle \ | grep -v grep \ | awk '{print $2}' \ | xargs kill > /dev/null 2>&1") + with settings(warn_only=True): - execute(kill_process, hosts=conf.HOSTS) + execute(kill_process, hosts=conf.HOSTS) signal.signal(signal.SIGINT, signal_handler) signal.pause() + if __name__ == '__main__': - parser = argparse.ArgumentParser(prog="paddle.py", - description='simple tool for cluster training') - parser.add_argument('-j', '--job_workspace', - required=False, default=None, - help='job workspace') - parser.add_argument('-p', '--job_dispatch_package', - required=False, default=None, - help='job package for dispatching to all other nodes') + parser = argparse.ArgumentParser( + prog="paddle.py", description='simple tool for cluster training') + parser.add_argument( + '-j', + '--job_workspace', + required=False, + default=None, + help='job workspace') + parser.add_argument( + '-p', + '--job_dispatch_package', + required=False, + default=None, + help='job package for dispatching to all other nodes') args, train_args_list = parser.parse_known_args() train_args = refine_unknown_args(train_args_list) @@ -227,14 +238,10 @@ def kill_process(): #if assigned workspace, do not need to dispatch data, #so job_local_package should be None assert args.job_dispatch_package is None - job_all(None, - args.job_workspace, - train_args_dict) + job_all(None, args.job_workspace, train_args_dict) elif args.job_dispatch_package is not None: assert args.job_workspace is None assert os.path.isdir(args.job_dispatch_package) - job_all(args.job_dispatch_package, - None, - train_args_dict) + job_all(args.job_dispatch_package, None, train_args_dict) else: print "--job_workspace or --job_dispatch_package should be set" diff --git a/paddle/trainer/tests/__init__.py b/paddle/trainer/tests/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/paddle/trainer/tests/__init__.py +++ b/paddle/trainer/tests/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/paddle/trainer/tests/config_parser_test.py b/paddle/trainer/tests/config_parser_test.py index 5ca874cec7914..c5ec315d6b01b 100644 --- a/paddle/trainer/tests/config_parser_test.py +++ b/paddle/trainer/tests/config_parser_test.py @@ -17,6 +17,6 @@ if __name__ == '__main__': parse_config_and_serialize('trainer/tests/test_config.conf', '') parse_config_and_serialize( - 'trainer/tests/sample_trainer_config.conf', + 'trainer/tests/sample_trainer_config.conf', 'extension_module_name=paddle.trainer.config_parser_extension') parse_config_and_serialize('gserver/tests/pyDataProvider/trainer.conf', '') diff --git a/paddle/trainer/tests/gen_proto_data.py b/paddle/trainer/tests/gen_proto_data.py index c818a94bee7c2..a3dbc10c886e1 100644 --- a/paddle/trainer/tests/gen_proto_data.py +++ b/paddle/trainer/tests/gen_proto_data.py @@ -21,8 +21,7 @@ import pprint logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', -) + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) logger = logging.getLogger('paddle') logger.setLevel(logging.INFO) @@ -36,33 +35,32 @@ # [[-1,0], [0,0]] means previous token at column 0 and current token at # column 0 are combined as one feature. patterns = [ - [[-2,0]], - [[-1,0]], - [[0,0]], - [[1,0]], - [[2,0]], - - [[-1,0], [0,0]], - [[0,0], [1,0]], - - [[-2,1]], - [[-1,1]], - [[0,1]], - [[1,1]], - [[2,1]], - [[-2,1], [-1,1]], - [[-1,1], [0,1]], - [[0,1], [1,1]], - [[1,1], [2,1]], - - [[-2,1], [-1,1], [0,1]], - [[-1,1], [0,1], [1,1]], - [[0,1], [1,1], [2,1]], + [[-2, 0]], + [[-1, 0]], + [[0, 0]], + [[1, 0]], + [[2, 0]], + [[-1, 0], [0, 0]], + [[0, 0], [1, 0]], + [[-2, 1]], + [[-1, 1]], + [[0, 1]], + [[1, 1]], + [[2, 1]], + [[-2, 1], [-1, 1]], + [[-1, 1], [0, 1]], + [[0, 1], [1, 1]], + [[1, 1], [2, 1]], + [[-2, 1], [-1, 1], [0, 1]], + [[-1, 1], [0, 1], [1, 1]], + [[0, 1], [1, 1], [2, 1]], ] + def make_features(sequence): length = len(sequence) num_features = len(sequence[0]) + def get_features(pos): if pos < 0: return ['#B%s' % -pos] * num_features @@ -72,9 +70,10 @@ def get_features(pos): for i in xrange(length): for pattern in patterns: - fname = '/'.join([get_features(i+pos)[f] for pos, f in pattern]) + fname = '/'.join([get_features(i + pos)[f] for pos, f in pattern]) sequence[i].append(fname) + ''' Source file format: Each line is for one timestep. The features are separated by space. @@ -87,6 +86,8 @@ def get_features(pos): return a list of dict for each column ''' + + def create_dictionaries(filename, cutoff, oov_policy): def add_to_dict(sequence, dicts): num_features = len(dicts) @@ -118,7 +119,6 @@ def add_to_dict(sequence, dicts): features = line.split(' ') sequence.append(features) - for i in xrange(num_features): dct = dicts[i] n = 1 if oov_policy[i] == OOV_POLICY_USE else 0 @@ -161,12 +161,9 @@ def write_proto(file, message): if oov_policy[i] == OOV_POLICY_ERROR, all features in i-th column MUST exist in dicts[i]. ''' -def gen_proto_file( - input_file, - dicts, - oov_policy, - output_file): + +def gen_proto_file(input_file, dicts, oov_policy, output_file): def write_sequence(out, sequence): num_features = len(dicts) is_beginning = True @@ -213,8 +210,8 @@ def write_sequence(out, sequence): if patterns: slot_def = header.slot_defs.add() slot_def.type = DataFormat.SlotDef.VECTOR_SPARSE_NON_VALUE - slot_def.dim = sum([len(dicts[i]) - for i in xrange(num_original_columns, len(dicts))]) + slot_def.dim = sum( + [len(dicts[i]) for i in xrange(num_original_columns, len(dicts))]) logger.info("feature_dim=%s" % slot_def.dim) for i in xrange(num_original_columns): @@ -242,30 +239,31 @@ def write_sequence(out, sequence): logger.info("num_sequences=%s" % num_sequences) + dict2 = { - 'B-ADJP': 0, - 'I-ADJP': 1, - 'B-ADVP': 2, - 'I-ADVP': 3, - 'B-CONJP': 4, - 'I-CONJP': 5, - 'B-INTJ': 6, - 'I-INTJ': 7, - 'B-LST': 8, - 'I-LST': 9, - 'B-NP': 10, - 'I-NP': 11, - 'B-PP': 12, - 'I-PP': 13, - 'B-PRT': 14, - 'I-PRT': 15, - 'B-SBAR': 16, - 'I-SBAR': 17, - 'B-UCP': 18, - 'I-UCP': 19, - 'B-VP': 20, - 'I-VP': 21, - 'O': 22 + 'B-ADJP': 0, + 'I-ADJP': 1, + 'B-ADVP': 2, + 'I-ADVP': 3, + 'B-CONJP': 4, + 'I-CONJP': 5, + 'B-INTJ': 6, + 'I-INTJ': 7, + 'B-LST': 8, + 'I-LST': 9, + 'B-NP': 10, + 'I-NP': 11, + 'B-PP': 12, + 'I-PP': 13, + 'B-PRT': 14, + 'I-PRT': 15, + 'B-SBAR': 16, + 'I-SBAR': 17, + 'B-UCP': 18, + 'I-UCP': 19, + 'B-VP': 20, + 'I-VP': 21, + 'O': 22 } if __name__ == '__main__': @@ -273,16 +271,9 @@ def write_sequence(out, sequence): cutoff += [3] * len(patterns) oov_policy = [OOV_POLICY_IGNORE, OOV_POLICY_ERROR, OOV_POLICY_ERROR] oov_policy += [OOV_POLICY_IGNORE] * len(patterns) - dicts = create_dictionaries( - 'trainer/tests/train.txt', cutoff, oov_policy) + dicts = create_dictionaries('trainer/tests/train.txt', cutoff, oov_policy) dicts[2] = dict2 - gen_proto_file( - 'trainer/tests/train.txt', - dicts, - oov_policy, - 'trainer/tests/train_proto.bin') - gen_proto_file( - 'trainer/tests/test.txt', - dicts, - oov_policy, - 'trainer/tests/test_proto.bin') + gen_proto_file('trainer/tests/train.txt', dicts, oov_policy, + 'trainer/tests/train_proto.bin') + gen_proto_file('trainer/tests/test.txt', dicts, oov_policy, + 'trainer/tests/test_proto.bin') diff --git a/paddle/trainer/tests/testPyDataWrapper.py b/paddle/trainer/tests/testPyDataWrapper.py index 49bd760f4e20e..4607bec24e1fe 100644 --- a/paddle/trainer/tests/testPyDataWrapper.py +++ b/paddle/trainer/tests/testPyDataWrapper.py @@ -21,7 +21,10 @@ import string -@provider(slots=[SparseNonValueSlot(10), DenseSlot(2), SparseValueSlot(10), StringSlot(1), IndexSlot(3)]) +@provider(slots=[ + SparseNonValueSlot(10), DenseSlot(2), SparseValueSlot(10), StringSlot(1), + IndexSlot(3) +]) def processNonSequenceData(obj, filename): with open(filename, "rb") as f: for line in f: @@ -50,6 +53,7 @@ def __values_mapper__(s): seq_count_randomer = lambda: random.randrange(1, SEQUENCE_LIMIT) str_count_randomer = lambda: random.randrange(1, STRING_LIMIT) + class IDRandomer(): # A random generator, return unique id def __init__(self): self.id_set = set() @@ -61,38 +65,57 @@ def __call__(self): return idx else: return self.__call__() + + # SparseValueSlot def sparse_value_creator(_): rand = IDRandomer() return [(rand(), val_randomer()) for _ in xrange(sparse_count_randomer())] + + sparse_value = map(sparse_value_creator, range(seq_count_randomer())) + # DenseSlot def dense_creator(_): return [val_randomer() for _ in xrange(SPARSE_ID_LIMIT)] + + dense = map(dense_creator, range(seq_count_randomer())) + # SparseNonValueSlot def sparse_creator(_): rand = IDRandomer() return [rand() for _ in xrange(sparse_count_randomer())] + + sparse_nonvalue = map(sparse_creator, range(seq_count_randomer())) # IndexSlot ids = [sparse_id_randomer() for _ in range(seq_count_randomer())] + # StringSlot -def random_str(size = 8, chars=string.ascii_letters + string.digits): +def random_str(size=8, chars=string.ascii_letters + string.digits): return ''.join(random.choice(chars) for _ in range(size)) + + strs = [random_str(str_count_randomer()) for _ in range(seq_count_randomer())] + def processSeqAndGenerateDataInit(obj, *args, **kwargs): obj.json_filename = kwargs.get("load_data_args", "test_data.json") -@provider(slots=[SparseValueSlot(SPARSE_ID_LIMIT), DenseSlot(SPARSE_ID_LIMIT), - SparseNonValueSlot(SPARSE_ID_LIMIT), IndexSlot(SPARSE_ID_LIMIT), - StringSlot(SPARSE_ID_LIMIT)], - use_seq=True, init_hook=processSeqAndGenerateDataInit) + +@provider( + slots=[ + SparseValueSlot(SPARSE_ID_LIMIT), DenseSlot(SPARSE_ID_LIMIT), + SparseNonValueSlot(SPARSE_ID_LIMIT), IndexSlot(SPARSE_ID_LIMIT), + StringSlot(SPARSE_ID_LIMIT) + ], + use_seq=True, + init_hook=processSeqAndGenerateDataInit) def processSeqAndGenerateData(obj, name): retv = [sparse_value, dense, sparse_nonvalue, ids, strs] # Write to protoseq. @@ -104,10 +127,15 @@ def processSeqAndGenerateData(obj, name): def processSubSeqAndGenerateDataInit(obj, *args, **kwargs): obj.json_filename = kwargs.get("load_data_args", "test_data.json") -@provider(slots=[SparseValueSlot(SPARSE_ID_LIMIT), DenseSlot(SPARSE_ID_LIMIT), - SparseNonValueSlot(SPARSE_ID_LIMIT), IndexSlot(SPARSE_ID_LIMIT), - StringSlot(SPARSE_ID_LIMIT)], - use_seq=True, init_hook=processSubSeqAndGenerateDataInit) + +@provider( + slots=[ + SparseValueSlot(SPARSE_ID_LIMIT), DenseSlot(SPARSE_ID_LIMIT), + SparseNonValueSlot(SPARSE_ID_LIMIT), IndexSlot(SPARSE_ID_LIMIT), + StringSlot(SPARSE_ID_LIMIT) + ], + use_seq=True, + init_hook=processSubSeqAndGenerateDataInit) def processSubSeqAndGenerateData(obj, name): retv_json = [sparse_value, dense, sparse_nonvalue, ids, strs] retv_wrapper = [[sparse_value], [dense], [sparse_nonvalue], [ids], [strs]] @@ -116,6 +144,7 @@ def processSubSeqAndGenerateData(obj, name): json.dump(retv_json, f) yield retv_wrapper + if __name__ == "__main__": pvd = processNonSequenceData("test.txt") print pvd.getNextBatch(100) diff --git a/paddle/utils/enable_virtualenv.py b/paddle/utils/enable_virtualenv.py index 99d822a4145cc..ccfaa7c147b2c 100644 --- a/paddle/utils/enable_virtualenv.py +++ b/paddle/utils/enable_virtualenv.py @@ -1,10 +1,12 @@ import os + def __activate_virtual_env__(): - __path__ = os.getenv('VIRTUAL_ENV') - if __path__ is None: - return - __script__ = os.path.join(__path__, 'bin', 'activate_this.py') - execfile(__script__, {'__file__': __script__}) + __path__ = os.getenv('VIRTUAL_ENV') + if __path__ is None: + return + __script__ = os.path.join(__path__, 'bin', 'activate_this.py') + execfile(__script__, {'__file__': __script__}) + __activate_virtual_env__() From a1ba3f442fd80382969ed2c434a66985be1e2c1f Mon Sep 17 00:00:00 2001 From: qijun Date: Sat, 12 Nov 2016 02:26:18 +0000 Subject: [PATCH 157/180] format python code in python directory --- python/paddle/__init__.py | 1 - python/paddle/trainer/PyDataProvider2.py | 35 +- .../paddle/trainer/PyDataProviderWrapper.py | 35 +- python/paddle/trainer/__init__.py | 1 - python/paddle/trainer/config_parser.py | 2008 ++++++++--------- .../paddle/trainer/config_parser_extension.py | 10 +- python/paddle/trainer/recurrent_units.py | 489 ++-- .../trainer_config_helpers/activations.py | 51 +- python/paddle/trainer_config_helpers/attrs.py | 37 +- .../trainer_config_helpers/data_sources.py | 55 +- .../default_decorators.py | 19 +- .../trainer_config_helpers/evaluators.py | 241 +- .../paddle/trainer_config_helpers/layers.py | 1433 +++++++----- python/paddle/trainer_config_helpers/math.py | 27 +- .../paddle/trainer_config_helpers/networks.py | 772 ++++--- .../trainer_config_helpers/optimizers.py | 51 +- .../paddle/trainer_config_helpers/poolings.py | 23 +- .../tests/configs/img_layers.py | 19 +- .../tests/configs/img_trans_layers.py | 20 +- .../tests/configs/last_first_seq.py | 17 +- .../tests/configs/layer_activations.py | 16 +- .../tests/configs/math_ops.py | 8 +- .../tests/configs/projections.py | 29 +- .../tests/configs/shared_fc.py | 27 +- .../tests/configs/shared_lstm.py | 28 +- .../tests/configs/simple_rnn_layers.py | 31 +- .../tests/configs/test_bi_grumemory.py | 5 +- .../tests/configs/test_bilinear_interp.py | 37 +- .../tests/configs/test_cost_layers.py | 49 +- .../configs/test_cost_layers_with_weight.py | 12 +- .../tests/configs/test_expand_layer.py | 14 +- .../tests/configs/test_fc.py | 12 +- .../tests/configs/test_grumemory_layer.py | 14 +- .../tests/configs/test_hsigmoid.py | 7 +- .../tests/configs/test_lstmemory_layer.py | 14 +- .../tests/configs/test_maxout.py | 68 +- .../tests/configs/test_ntm_layers.py | 35 +- .../tests/configs/test_print_layer.py | 5 +- .../tests/configs/test_rnn_group.py | 25 +- .../tests/configs/test_sequence_pooling.py | 21 +- .../tests/configs/test_split_datasource.py | 14 +- .../tests/configs/test_spp_layer.py | 16 +- .../tests/configs/unused_layers.py | 7 +- .../tests/configs/util_layers.py | 8 +- .../tests/layers_test_config.py | 65 +- python/paddle/trainer_config_helpers/utils.py | 4 +- python/paddle/utils/image_util.py | 76 +- python/paddle/utils/make_model_diagram.py | 17 +- python/paddle/utils/plotcurve.py | 40 +- python/paddle/utils/predefined_net.py | 240 +- python/paddle/utils/preprocess_img.py | 37 +- python/paddle/utils/preprocess_util.py | 65 +- python/paddle/utils/show_pb.py | 9 +- python/paddle/utils/torch2paddle.py | 25 +- 54 files changed, 3498 insertions(+), 2926 deletions(-) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/python/paddle/trainer/PyDataProvider2.py b/python/paddle/trainer/PyDataProvider2.py index 53409b746d811..0c577ec657bc6 100644 --- a/python/paddle/trainer/PyDataProvider2.py +++ b/python/paddle/trainer/PyDataProvider2.py @@ -18,9 +18,8 @@ import functools import itertools -logging.basicConfig( - format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]" - " %(message)s") +logging.basicConfig(format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]" + " %(message)s") class SequenceType(object): @@ -132,8 +131,10 @@ def __init__(self, generator, input_order): def __call__(self, obj, filename): for item in self.generator(obj, filename): if isinstance(item, dict): - yield [item.get(input_name, None) for input_name in - self.input_order] + yield [ + item.get(input_name, None) + for input_name in self.input_order + ] else: yield item @@ -162,8 +163,8 @@ def __call__(self, obj, filename): yield items except AssertionError as e: self.logger.warning( - "Item (%s) is not fit the input type with error %s" - % (repr(item), repr(e))) + "Item (%s) is not fit the input type with error %s" % + (repr(item), repr(e))) if self.check_fail_continue: continue @@ -202,13 +203,17 @@ def loop_check(callback, item): callback(each) -def provider(input_types=None, should_shuffle=None, pool_size=-1, +def provider(input_types=None, + should_shuffle=None, + pool_size=-1, min_pool_size=-1, can_over_batch_size=True, calc_batch_size=None, cache=CacheType.NO_CACHE, - check=False, check_fail_continue=False, - init_hook=None, **kwargs): + check=False, + check_fail_continue=False, + init_hook=None, + **kwargs): """ Provider decorator. Use it to make a function into PyDataProvider2 object. In this function, user only need to get each sample for some train/test @@ -318,9 +323,9 @@ def __init__(self, file_list, **kwargs): "Could not recognize should_shuffle (%s), " "just use default value of should_shuffle." " Please set should_shuffle to bool value or " - "something in %s" % ( - repr(self.should_shuffle), - repr(true_table + false_table))) + "something in %s" % + (repr(self.should_shuffle), + repr(true_table + false_table))) self.should_shuffle = None self.pool_size = pool_size @@ -351,8 +356,7 @@ def __init__(self, file_list, **kwargs): self.generator = InputOrderWrapper(self.generator, self.input_order) if self.check: - self.generator = CheckWrapper(self.generator, - self.slots, + self.generator = CheckWrapper(self.generator, self.slots, check_fail_continue, self.logger) @@ -368,4 +372,3 @@ def deserialize_args(args): :return: """ return cPickle.loads(args) - diff --git a/python/paddle/trainer/PyDataProviderWrapper.py b/python/paddle/trainer/PyDataProviderWrapper.py index c4b907af54699..90b684a000017 100644 --- a/python/paddle/trainer/PyDataProviderWrapper.py +++ b/python/paddle/trainer/PyDataProviderWrapper.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ This module provide a wrapper(decorator) to wrap a data process method into a PyDataProvider. Some examples are shown `here `_. @@ -47,6 +46,7 @@ import io + class SlotType(object): # Just a hint for user. pass @@ -83,6 +83,7 @@ class SparseNonValueSlot(SlotType): - **SubSeq**: [[[int, int, ...], [int, ....], ...] , \ [[int, int, ...], [int, ....], ...] , ...] """ + def __init__(self, dim): """ :param dim: slot dimension @@ -294,8 +295,9 @@ def reset(self): fn = "%s_%d" % (self.profile_filename, self.profile_count) sortby = "cumulative" with open(fn, "w") as f: - pstats.Stats(self.profiler, stream=f).sort_stats( - sortby).print_stats() + pstats.Stats( + self.profiler, + stream=f).sort_stats(sortby).print_stats() self.logger.info("saving profile to file %s" % fn) self.profile_count += 1 self.logger.info("resetting profile") @@ -453,9 +455,10 @@ def writeDataStream(dat, data_callback): seq_stream.flush() subseq_stream.flush() - return "".join([self.int_packer.pack(current_batch_size), - data_bytes.getvalue(), - seq_bytes.getvalue(), subseq_bytes.getvalue()]) + return "".join([ + self.int_packer.pack(current_batch_size), data_bytes.getvalue(), + seq_bytes.getvalue(), subseq_bytes.getvalue() + ]) finally: data_stream.close() @@ -516,7 +519,7 @@ def __prepareData(self, batch_size, ret_list): self.data_pool[idx]) idx -= 1 - ret_list += self.data_pool[self.data_pool_idx: idx + 1] + ret_list += self.data_pool[self.data_pool_idx:idx + 1] # for speed reason, just shift left index, not delete data actually. self.data_pool_idx = idx + 1 @@ -537,8 +540,8 @@ def fillPool(self): if self.max_pool_size == 0: for i in xrange(min(self.file_count, len(self.generators))): self.data_pool += list(self.generators[i]) - self.generators = self.generators[ - min(self.file_count, len(self.generators)):] + self.generators = self.generators[min(self.file_count, + len(self.generators)):] self.max_pool_size = len(self.data_pool) else: while len(self.data_pool) < self.max_pool_size and len( @@ -562,9 +565,15 @@ def default_init_hook(cls, *args, **kwargs): del cls, args, kwargs -def provider(slots=None, use_seq=False, should_shuffle=True, pool_size=1, - can_over_batch_size=True, calc_batch_size=lambda data: 1, - debug=False, init_hook=default_init_hook, profile_filename=None): +def provider(slots=None, + use_seq=False, + should_shuffle=True, + pool_size=1, + can_over_batch_size=True, + calc_batch_size=lambda data: 1, + debug=False, + init_hook=default_init_hook, + profile_filename=None): """ The decorator for PyDataProvider. User should use this to create Provider class. User should only concern how to read sample from file. @@ -663,7 +672,7 @@ class Cls(GeneralPyDataProvider): def __init__(self, *file_list, **kwargs): logging.basicConfig( format="[%(levelname)s %(asctime)s %(filename)s:%(lineno)s]" - " %(message)s") + " %(message)s") self.logger = logging.getLogger("") if debug: diff --git a/python/paddle/trainer/__init__.py b/python/paddle/trainer/__init__.py index 7f9e87eee6037..c90af2ee000d4 100644 --- a/python/paddle/trainer/__init__.py +++ b/python/paddle/trainer/__init__.py @@ -11,4 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index eec978e1faf48..881f0b821491b 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -13,7 +13,6 @@ # limitations under the License. from __future__ import print_function - ''' The following functions are available in the config file: @@ -101,50 +100,45 @@ raise logging.basicConfig( - format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', -) + format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s', ) logger = logging.getLogger('paddle') logger.setLevel(logging.INFO) __real_print__ = print -print=logger.info +print = logger.info # from layer type name to layer class g_layer_type_map = {} + # Initialize global variables. We use this function so that we can # call parse_config() multiple times def init_config_environment( - g_default_momentum = None, - g_default_decay_rate = None, - g_default_initial_mean = 0., - g_default_initial_std = 0.01, - g_default_num_batches_regularization = None, - g_default_initial_strategy = 0, - g_default_initial_smart = False, - g_default_gradient_clipping_threshold = None, - g_default_device = None, - g_default_update_hooks = None, - g_default_compact_func = None, - - g_config = TrainerConfig(), - g_layer_map = {}, - g_parameter_map = {}, - - g_extended_config_funcs = {}, + g_default_momentum=None, + g_default_decay_rate=None, + g_default_initial_mean=0., + g_default_initial_std=0.01, + g_default_num_batches_regularization=None, + g_default_initial_strategy=0, + g_default_initial_smart=False, + g_default_gradient_clipping_threshold=None, + g_default_device=None, + g_default_update_hooks=None, + g_default_compact_func=None, + g_config=TrainerConfig(), + g_layer_map={}, + g_parameter_map={}, + g_extended_config_funcs={}, # store command args of paddle_trainer - g_command_config_args = {}, + g_command_config_args={}, # Used for PyDataProvider to avoid duplicate module name - g_py_module_name_list = [], - - g_current_submodel = None, - g_root_submodel = None, - g_submodel_map = {}, - g_submodel_stack = [], - - g_add_submodel_suffix = False, - ): + g_py_module_name_list=[], + g_current_submodel=None, + g_root_submodel=None, + g_submodel_map={}, + g_submodel_stack=[], + g_add_submodel_suffix=False, ): for k, v in locals().iteritems(): globals()[k] = copy.deepcopy(v) @@ -161,43 +155,54 @@ def config_assert(b, msg): if not b: logger.fatal(msg) + g_config_funcs = {} + # decorator for indicating a function which can be used in config file def config_func(func): g_config_funcs[func.func_name] = func return func + # decorator for indicating a class which can be used in config file def config_class(cls): g_config_funcs[cls.__name__] = cls return cls + # decorator for indicating a class for a layer type def config_layer(layer_type): def wrap(cls): g_config_funcs[cls.__name__] = cls g_layer_type_map[layer_type] = cls return cls + return wrap + def gen_parameter_name(layer_name, input_index): return '_%s.w%d' % (layer_name, input_index) + def gen_bias_parameter_name(layer_name): return '_%s.wbias' % layer_name + def default(x, default_value): return default_value if x is None else x + class Cfg(object): def add_keys(self, locals): for k, v in locals.iteritems(): if not k.startswith('_'): self.__setattr__(k, v) + # functions available in config file + # Define the name of the input layers of the NeuralNetwork. # The type of these layers must be "data". # These layers will be provided with the DataBatch obtained @@ -216,6 +221,7 @@ def Inputs(*args): if g_current_submodel is g_root_submodel: g_config.model_config.input_layer_names.append(name) + @config_func def HasInputsSet(): return len(g_current_submodel.input_layer_names) != 0 @@ -244,7 +250,7 @@ def SubModelBegin(name): global g_current_submodel, g_root_submodel, g_submodel_stack g_submodel_stack.append(g_current_submodel) - name = MakeLayerNameInParentSubmodel(name) #rename in nested submodel + name = MakeLayerNameInParentSubmodel(name) #rename in nested submodel config_assert(name not in g_submodel_map, 'Duplicated submodel name: %s' % name) @@ -254,36 +260,42 @@ def SubModelBegin(name): g_submodel_map[name] = sub_model g_current_submodel = sub_model + @config_func -def SubModelEnd(name = None): +def SubModelEnd(name=None): global g_current_submodel, g_root_submodel, g_submodel_stack - config_assert(g_current_submodel is not g_root_submodel, "submodel not begin") + config_assert(g_current_submodel is not g_root_submodel, + "submodel not begin") if name is not None: - config_assert(g_current_submodel.name == MakeLayerNameInParentSubmodel(name), - "submodel name error") + config_assert( + g_current_submodel.name == MakeLayerNameInParentSubmodel(name), + "submodel name error") g_current_submodel = g_submodel_stack.pop() + def MakeLayerNameInParentSubmodel(name): suffix = "" if len(g_submodel_stack) > 1: suffix = "@" + g_submodel_stack[-1].name return name + suffix + def GetLayerBaseName(name): return name.split('@')[0] -def MakeLayerNameInSubmodel(name, submodel_name = None): + +def MakeLayerNameInSubmodel(name, submodel_name=None): global g_current_submodel global g_add_submodel_suffix - if (submodel_name is None - and not g_add_submodel_suffix - and not g_current_submodel.is_recurrent_layer_group): + if (submodel_name is None and not g_add_submodel_suffix and + not g_current_submodel.is_recurrent_layer_group): return name if submodel_name is None: submodel_name = g_current_submodel.name return name + "@" + submodel_name + # Define a recurrent layer group begin with RecurrentLayerGroupBegin # and end with RecurrentLayerGroupEnd. # A recurrent layer group forward/backward one frame after previous frame @@ -332,8 +344,10 @@ def RecurrentLayerGroupWithoutOutLinksBegin(name, if in_links_count == 0: in_links_has_subseq = has_subseq else: - config_assert(in_links_has_subseq == has_subseq, - "The sequence type of in_links should be the same in RecurrentLayerGroup") + config_assert( + in_links_has_subseq == has_subseq, + "The sequence type of in_links should be the same in RecurrentLayerGroup" + ) in_links_count += 1 layer_name = MakeLayerNameInParentSubmodel(name) layer = g_layer_map[layer_name] @@ -347,6 +361,7 @@ def RecurrentLayerGroupWithoutOutLinksBegin(name, pair.link_name = MakeLayerNameInSubmodel(name) pair.has_subseq = has_subseq + @config_func def RecurrentLayerGroupSetOutLink(link): if isinstance(link, basestring): @@ -363,8 +378,7 @@ def RecurrentLayerGroupSetOutLink(link): def RecurrentLayerGroupSetGenerator(generator=None): - generator.eos_layer_name = MakeLayerNameInSubmodel( - generator.eos_layer_name) + generator.eos_layer_name = MakeLayerNameInSubmodel(generator.eos_layer_name) g_current_submodel.generator.CopyFrom(generator) @@ -375,21 +389,18 @@ def RecurrentLayerGroupBegin(name, generator=None, target_inlinkname="", seq_reversed=False): - RecurrentLayerGroupWithoutOutLinksBegin(name, - in_links, - seq_reversed, + RecurrentLayerGroupWithoutOutLinksBegin(name, in_links, seq_reversed, target_inlinkname) for link in out_links: RecurrentLayerGroupSetOutLink(link) - if generator is not None: RecurrentLayerGroupSetGenerator(generator) - config_assert(len(in_links) == 0, - "no in_links should be passed to generator") - config_assert(len(out_links) >= 1, - "one or more than one out_links should be passed to generator") - + config_assert( + len(in_links) == 0, "no in_links should be passed to generator") + config_assert( + len(out_links) >= 1, + "one or more than one out_links should be passed to generator") @config_func @@ -397,9 +408,10 @@ def RecurrentLayerGroupEnd(name): global g_current_submodel config_assert(g_current_submodel.is_recurrent_layer_group, "RecurrentLayerGroup not begin") - for pair in g_current_submodel.memories: #check exist + for pair in g_current_submodel.memories: #check exist layer = g_layer_map[pair.layer_name] - config_assert(layer is not None, "memory declare wrong name:%s" % pair.layer_name) + config_assert(layer is not None, "memory declare wrong name:%s" % + pair.layer_name) memory_link = g_layer_map[pair.link_name] config_assert(layer.size == memory_link.size, "memory declare wrong size:%d" % memory_link.size) @@ -418,12 +430,14 @@ def RecurrentLayerGroupEnd(name): else: GatherAgentLayer(name=agent_name, size=layer.size) + # Define the model type # currently, the paddle supports "nn", "recurrent_nn", "recursive_nn" and "multi_nn" @config_func def model_type(name): g_config.model_config.type = name + @config_class class Bias(Cfg): def __init__( @@ -441,10 +455,10 @@ def __init__( sparse_remote_update=None, gradient_clipping_threshold=None, is_static=None, - is_shared=None, - ): + is_shared=None, ): self.add_keys(locals()) + # Define one input for a layer @config_class class Input(Cfg): @@ -477,19 +491,20 @@ def __init__( is_static=None, is_shared=None, update_hooks=None, - input_layer_argument=None, - ): + input_layer_argument=None, ): self.add_keys(locals()) self.input_layer_name = MakeLayerNameInSubmodel(input_layer_name) + # Define a projection for iexed layer @config_class class Projection(Input): - type = None # subclass should set it correctly + type = None # subclass should set it correctly + def __init__( self, input_layer_name, - size = 0, # projection output size + size=0, # projection output size parameter_name=None, learning_rate=None, momentum=None, @@ -509,8 +524,7 @@ def __init__( is_static=None, is_shared=None, update_hooks=None, - input_layer_argument=None, - ): + input_layer_argument=None, ): self.add_keys(locals()) self.input_layer_name = MakeLayerNameInSubmodel(input_layer_name) @@ -524,8 +538,10 @@ def __init__( # to indicate using the size from Layer config def calc_output_size(self, input_layer_config): return self.size + def calc_parameter_size(self, input_size, output_size): raise NotimplementedError + def calc_parameter_dims(self, input_size, output_size): raise NotimplementedError @@ -536,31 +552,32 @@ class IdentityProjection(Projection): def calc_output_size(self, input_layer_config): return input_layer_config.size + def calc_parameter_size(self, input_size, output_size): return 0 + def calc_parameter_dims(self, input_size, output_size): return [] + # Like IdentityProjection, but layer size may smaller than input size, # the projection select dimesions [offset, offset+layer_size) from input @config_class class IdentityOffsetProjection(Projection): type = 'identity_offset' - def __init__( - self, - input_layer_name, - offset, - **xargs): - super(IdentityOffsetProjection, self).__init__( - input_layer_name, **xargs) + def __init__(self, input_layer_name, offset, **xargs): + super(IdentityOffsetProjection, self).__init__(input_layer_name, + **xargs) self.proj_conf.offset = offset def calc_parameter_size(self, input_size, output_size): return 0 + def calc_parameter_dims(self, input_size, output_size): return [] + # DotMulProjection performs element-wise multiplication with weight @config_class class DotMulProjection(Projection): @@ -568,49 +585,53 @@ class DotMulProjection(Projection): def calc_output_size(self, input_layer_config): return input_layer_config.size + def calc_parameter_size(self, input_size, output_size): return output_size + def calc_parameter_dims(self, input_size, output_size): return [1, output_size] + @config_class class TableProjection(Projection): type = 'table' def calc_parameter_size(self, input_size, output_size): return input_size * output_size + def calc_parameter_dims(self, input_size, output_size): return [input_size, output_size] + @config_class class FullMatrixProjection(Projection): type = 'fc' def calc_parameter_size(self, input_size, output_size): return input_size * output_size + def calc_parameter_dims(self, input_size, output_size): return [input_size, output_size] + @config_class class TransposedFullMatrixProjection(Projection): type = 'trans_fc' def calc_parameter_size(self, input_size, output_size): return input_size * output_size + def calc_parameter_dims(self, input_size, output_size): return [output_size, input_size] + @config_class class ContextProjection(Projection): type = 'context' - def __init__( - self, - input_layer_name, - context_start, - context_length, - trainable_padding, - **xargs): + def __init__(self, input_layer_name, context_start, context_length, + trainable_padding, **xargs): super(ContextProjection, self).__init__(input_layer_name, **xargs) self.proj_conf.context_start = context_start self.proj_conf.context_length = context_length @@ -638,23 +659,21 @@ def calc_parameter_dims(self, input_size, output_size): class ConvProjection(Projection): type = 'conv' - def __init__( - self, - input_layer_name, - num_filters=None, - conv_conf=None, - **xargs): + def __init__(self, + input_layer_name, + num_filters=None, + conv_conf=None, + **xargs): super(ConvProjection, self).__init__(input_layer_name, **xargs) if num_filters is not None: self.proj_conf.num_filters = num_filters - parse_conv(conv_conf, - input_layer_name, - self.proj_conf.conv_conf, + parse_conv(conv_conf, input_layer_name, self.proj_conf.conv_conf, num_filters) # TODO: support rectangle input - self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x ** 2) * num_filters + self.proj_conf.output_size = (self.proj_conf.conv_conf.output_x** + 2) * num_filters def calc_output_size(self, input_layer_config): return self.proj_conf.output_size @@ -672,14 +691,15 @@ def calc_bias_size(self): def calc_parameter_dims(self, input_size, output_size): return None + # Define a operator for mixed layer @config_class class Operator(Cfg): - type = None # subclass should set it correctly + type = None # subclass should set it correctly + def __init__( self, - input_layer_names, - ): + input_layer_names, ): self.add_keys(locals()) self.operator_conf = OperatorConfig() self.operator_conf.type = self.type @@ -690,16 +710,13 @@ def check_dims(self): def calc_output_size(self, input_sizes): return 0 + @config_class class DotMulOperator(Operator): type = 'dot_mul' - def __init__( - self, - input_layer_names, - scale=None, - **xargs): - super(DotMulOperator, self).__init__( - input_layer_names, **xargs) + + def __init__(self, input_layer_names, scale=None, **xargs): + super(DotMulOperator, self).__init__(input_layer_names, **xargs) if scale is not None: self.operator_conf.dotmul_scale = scale @@ -715,26 +732,24 @@ def calc_output_size(self, input_sizes): return input_sizes[0] - @config_class class ConvOperator(Operator): type = 'conv' - def __init__( - self, - input_layer_names, - num_filters=None, - conv_conf=None, - **xargs): - super(ConvOperator, self).__init__( - input_layer_names, **xargs) + + def __init__(self, + input_layer_names, + num_filters=None, + conv_conf=None, + **xargs): + super(ConvOperator, self).__init__(input_layer_names, **xargs) if num_filters is not None: self.operator_conf.num_filters = num_filters parse_conv(conv_conf, MakeLayerNameInSubmodel(input_layer_names[0]), - self.operator_conf.conv_conf, - num_filters) - self.operator_conf.output_size = (self.operator_conf.conv_conf.output_x ** 2) * num_filters + self.operator_conf.conv_conf, num_filters) + self.operator_conf.output_size = (self.operator_conf.conv_conf.output_x + **2) * num_filters config_assert(len(input_layer_names) == 2, "Conv is binary operator") @@ -745,119 +760,106 @@ def calc_output_size(self, input_sizes): # please refer to the comments in proto/ModelConfig.proto @config_class class Conv(Cfg): - def __init__( - self, - filter_size, - channels, - padding = None, - stride = None, - groups = None, - filter_channels = None, - output_x = None, - img_size = None, - caffe_mode = True, - filter_size_y = None, - padding_y = None, - stride_y = None): + def __init__(self, + filter_size, + channels, + padding=None, + stride=None, + groups=None, + filter_channels=None, + output_x=None, + img_size=None, + caffe_mode=True, + filter_size_y=None, + padding_y=None, + stride_y=None): self.add_keys(locals()) if filter_size_y is None: - self.filter_size_y = filter_size + self.filter_size_y = filter_size if padding_y is None: - self.padding_y = padding + self.padding_y = padding if stride_y is None: - self.stride_y = stride + self.stride_y = stride if output_x is not None: - config_assert(output_x <= 0) + config_assert(output_x <= 0) + # please refer to the comments in proto/ModelConfig.proto @config_class class BilinearInterp(Cfg): - def __init__( - self, - out_size_x = None, - out_size_y = None, - num_channels = None): + def __init__(self, out_size_x=None, out_size_y=None, num_channels=None): self.add_keys(locals()) + # please refer to the comments in proto/ModelConfig.proto @config_class class Pool(Cfg): - def __init__( - self, - pool_type, - channels, - size_x, - size_y = None, - img_width = None, - start = None, - stride = None, - stride_y = None, - padding = None, - padding_y = None): + def __init__(self, + pool_type, + channels, + size_x, + size_y=None, + img_width=None, + start=None, + stride=None, + stride_y=None, + padding=None, + padding_y=None): self.add_keys(locals()) - + + # please refer to the comments in proto/ModelConfig.proto @config_class class SpatialPyramidPool(Cfg): - def __init__( - self, - pool_type, - pyramid_height, - channels, - img_width = None): + def __init__(self, pool_type, pyramid_height, channels, img_width=None): self.add_keys(locals()) + # please refer to the comments in proto/ModelConfig.proto @config_class class Norm(Cfg): - def __init__( - self, - norm_type, - channels, - size, - scale, - pow, - output_x = None, - img_size = None, - blocked = None): + def __init__(self, + norm_type, + channels, + size, + scale, + pow, + output_x=None, + img_size=None, + blocked=None): self.add_keys(locals()) + # please refer to the comments in proto/ModelConfig.proto @config_class class Image(Cfg): - def __init__( - self, - channels, - img_size = None): + def __init__(self, channels, img_size=None): self.add_keys(locals()) + @config_class class BlockExpand(Cfg): - def __init__( - self, - channels, - padding_x = 0, - padding_y = 0, - stride_x = 0, - stride_y = 0, - block_x = 0, - block_y = 0, - img_size_x = 0, - img_size_y = 0, - output_x = 0, - output_y = 0): + def __init__(self, + channels, + padding_x=0, + padding_y=0, + stride_x=0, + stride_y=0, + block_x=0, + block_y=0, + img_size_x=0, + img_size_y=0, + output_x=0, + output_y=0): self.add_keys(locals()) + @config_class class MaxOut(Cfg): - def __init__( - self, - channels, - groups, - img_size_x = 0, - img_size_y = 0): + def __init__(self, channels, groups, img_size_x=0, img_size_y=0): self.add_keys(locals()) + def DataBase(async_load_data=False, constant_slots=None, data_ratio=1, @@ -871,23 +873,23 @@ def DataBase(async_load_data=False, if constant_slots: data_config.constant_slots.extend(constant_slots) - data_config.data_ratio=data_ratio - data_config.is_main_data=is_main_data + data_config.data_ratio = data_ratio + data_config.is_main_data = is_main_data - usage_ratio=default(usage_ratio, settings_deprecated["usage_ratio"]) + usage_ratio = default(usage_ratio, settings_deprecated["usage_ratio"]) config_assert(usage_ratio >= 0 and usage_ratio <= 1, "The range of usage_ratio is [0, 1]") data_config.usage_ratio = usage_ratio return data_config + @config_func -def SimpleData( - files=None, - feat_dim=None, - context_len=None, - buffer_capacity=None, - **xargs): +def SimpleData(files=None, + feat_dim=None, + context_len=None, + buffer_capacity=None, + **xargs): data_config = DataBase(**xargs) data_config.type = 'simple' data_config.files = files @@ -898,31 +900,36 @@ def SimpleData( data_config.buffer_capacity = buffer_capacity return data_config + @config_func -def PyData( - files=None, - type=None, - file_group_queue_capacity=None, - load_data_module=None, - load_data_object=None, - load_data_args="", - load_file_count=None, - constant_slots=None, - load_thread_num=None, - **xargs): +def PyData(files=None, + type=None, + file_group_queue_capacity=None, + load_data_module=None, + load_data_object=None, + load_data_args="", + load_file_count=None, + constant_slots=None, + load_thread_num=None, + **xargs): data_config = DataBase(**xargs) data_config.type = 'py' if load_data_module in g_py_module_name_list: + def get_path(module): m = __import__(load_data_module) return os.path.split(os.path.realpath(m.__file__))[0] + # python C-api is not thread safe, one module can only be import once, # so here we nedd to copy the module with different names if it has to be # imported several times. - module_new_name = "%s_copy_%d" % (load_data_module, len(g_py_module_name_list)) + module_new_name = "%s_copy_%d" % (load_data_module, + len(g_py_module_name_list)) g_py_module_name_list.append(module_new_name) - module_path = "%s/%s.py" % (get_path(load_data_module), load_data_module) - new_module_path = "%s/%s.py" % (get_path(load_data_module), module_new_name) + module_path = "%s/%s.py" % (get_path(load_data_module), + load_data_module) + new_module_path = "%s/%s.py" % (get_path(load_data_module), + module_new_name) if os.path.isfile(module_path) == False: raise Exception("File %s is not exist." % module_path) shutil.copy2(module_path, new_module_path) @@ -947,15 +954,15 @@ def get_path(module): data_config.constant_slots.extend(constant_slots) return data_config + @config_func -def ProtoData( - files=None, - type=None, - file_group_queue_capacity=None, - load_file_count=None, - constant_slots=None, - load_thread_num=None, - **xargs): +def ProtoData(files=None, + type=None, + file_group_queue_capacity=None, + load_file_count=None, + constant_slots=None, + load_thread_num=None, + **xargs): data_config = DataBase(**xargs) if type is None: data_config.type = 'proto' @@ -976,25 +983,24 @@ def ProtoData( data_config.constant_slots.extend(constant_slots) return data_config + #real data for training is actually provided by "sub_data" data providers. @config_func -def MultiData( - sub_data=[] - ): +def MultiData(sub_data=[]): data_config = DataConfig() data_config.type = 'multi' data_config.sub_data_configs.extend(sub_data) return data_config + @config_func -def Data( - type, - files=None, - feat_dim=None, - slot_dims=None, - context_len=None, - buffer_capacity=None, - **xargs): +def Data(type, + files=None, + feat_dim=None, + slot_dims=None, + context_len=None, + buffer_capacity=None, + **xargs): data_config = DataBase(**xargs) data_config.type = type @@ -1030,15 +1036,19 @@ def TestData(data_config, async_load_data=None): " Data definition") g_config.test_data_config.async_load_data = async_load_data + def parse_bilinear(bilinear, input_layer_name, bilinear_conf): - bilinear_conf.out_size_x = bilinear.out_size_x; - bilinear_conf.out_size_y = bilinear.out_size_y; - bilinear_conf.num_channels = bilinear.num_channels; + bilinear_conf.out_size_x = bilinear.out_size_x + bilinear_conf.out_size_y = bilinear.out_size_y + bilinear_conf.num_channels = bilinear.num_channels + ''' caffe_mode: compute the output size using floor instead of ceil, which is consistent of caffe and CuDNN's convention. ''' + + def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode): output = (2 * padding + img_size - filter_size) / float(stride) if caffe_mode: @@ -1046,81 +1056,89 @@ def cnn_output_size(img_size, filter_size, padding, stride, caffe_mode): else: return 1 + int(math.ceil(output)) + ''' calcualte image_size based on output_size for convolution. It is the reverse function of cnn_output_size ''' + + def cnn_image_size(output_size, filter_size, padding, stride, caffe_mode): if caffe_mode: img_size = (output_size - 1) * stride + filter_size - 2 * padding else: - img_size = (output_size - 2) * stride + filter_size - 2 * padding + 1 + img_size = (output_size - 2) * stride + filter_size - 2 * padding + 1 return img_size + def parse_pool(pool, input_layer_name, pool_conf): pool_conf.pool_type = pool.pool_type - config_assert(pool.pool_type in ['max-projection', 'avg-projection', - 'cudnn-max-pool', 'cudnn-avg-pool'], - "pool-type %s is not in " + config_assert(pool.pool_type in [ + 'max-projection', 'avg-projection', 'cudnn-max-pool', 'cudnn-avg-pool' + ], "pool-type %s is not in " "['max-projection', 'avg-projection', " - "'cudnn-max-pool', 'cudnn-avg-pool']" - % pool.pool_type) + "'cudnn-max-pool', 'cudnn-avg-pool']" % pool.pool_type) pool_conf.channels = pool.channels pool_conf.size_x = pool.size_x pool_conf.stride = pool.stride pool_conf.size_y = default(pool.size_y, pool_conf.size_x) - pool_conf.stride_y = default(pool.stride_y, pool_conf.stride); + pool_conf.stride_y = default(pool.stride_y, pool_conf.stride) img_pixels = g_layer_map[input_layer_name].size / pool.channels # the img_width may be removed, # and it can be calculated automatically later. - pool_conf.img_size = default(pool.img_width, int(img_pixels ** 0.5)) + pool_conf.img_size = default(pool.img_width, int(img_pixels**0.5)) pool_conf.img_size_y = img_pixels / pool_conf.img_size config_assert(pool_conf.img_size * pool_conf.img_size_y == img_pixels, - "Incorrect input image size %d for input image pixels %d" - % (pool_conf.img_size, img_pixels)) + "Incorrect input image size %d for input image pixels %d" % + (pool_conf.img_size, img_pixels)) config_assert(not pool.start, "start is deprecated in pooling.") if pool.padding is not None: pool_conf.padding = pool.padding pool_conf.padding_y = default(pool.padding_y, pool_conf.padding) - pool_conf.output_x = cnn_output_size(pool_conf.img_size, pool_conf.size_x, - pool_conf.padding, pool_conf.stride, False) - pool_conf.output_y = cnn_output_size(pool_conf.img_size_y, pool_conf.size_y, - pool_conf.padding_y, pool_conf.stride_y, False) + pool_conf.output_x = cnn_output_size( + pool_conf.img_size, pool_conf.size_x, pool_conf.padding, + pool_conf.stride, False) + pool_conf.output_y = cnn_output_size( + pool_conf.img_size_y, pool_conf.size_y, pool_conf.padding_y, + pool_conf.stride_y, False) + def parse_spp(spp, input_layer_name, spp_conf): spp_conf.pool_type = spp.pool_type config_assert(spp.pool_type in ['max-projection', 'avg-projection'], - "pool-type %s is not in " "['max-projection', 'avg-projection']" - % spp.pool_type) + "pool-type %s is not in " + "['max-projection', 'avg-projection']" % spp.pool_type) spp_conf.pyramid_height = spp.pyramid_height spp_conf.channels = spp.channels img_pixels = g_layer_map[input_layer_name].size / spp_conf.channels - spp_conf.img_size = default(spp.img_width, int(img_pixels ** 0.5)) + spp_conf.img_size = default(spp.img_width, int(img_pixels**0.5)) spp_conf.img_size_y = img_pixels / spp_conf.img_size config_assert(spp_conf.img_size * spp_conf.img_size_y == img_pixels, - "Incorrect input image size %d for input image pixels %d" - % (spp_conf.img_size, img_pixels)) + "Incorrect input image size %d for input image pixels %d" % + (spp_conf.img_size, img_pixels)) + def parse_image(image, input_layer_name, image_conf): image_conf.channels = image.channels image_pixels = g_layer_map[input_layer_name].size / image_conf.channels - image_conf.img_size = int(image_pixels ** 0.5) - config_assert((image_conf.img_size ** 2) == image_pixels, - "Incorrect input image size %d for input image pixels %d" - % (image_conf.img_size, image_pixels)) + image_conf.img_size = int(image_pixels**0.5) + config_assert((image_conf.img_size**2) == image_pixels, + "Incorrect input image size %d for input image pixels %d" % + (image_conf.img_size, image_pixels)) + def parse_norm(norm, input_layer_name, norm_conf): norm_conf.norm_type = norm.norm_type config_assert(norm.norm_type in ['rnorm', 'cmrnorm-projection'], - "norm-type %s is not in [rnorm, 'cmrnorm-projection']" - % norm.norm_type) + "norm-type %s is not in [rnorm, 'cmrnorm-projection']" % + norm.norm_type) norm_conf.channels = norm.channels norm_conf.size = norm.size norm_conf.scale = norm.scale @@ -1128,20 +1146,23 @@ def parse_norm(norm, input_layer_name, norm_conf): norm_conf.blocked = norm.blocked img_pixels = g_layer_map[input_layer_name].size / norm.channels - norm_conf.img_size = int(img_pixels ** 0.5) - config_assert((norm_conf.img_size ** 2) == img_pixels, - "Incorrect input image size %d for input image pixels %d" - % (norm_conf.img_size, img_pixels)) + norm_conf.img_size = int(img_pixels**0.5) + config_assert((norm_conf.img_size**2) == img_pixels, + "Incorrect input image size %d for input image pixels %d" % + (norm_conf.img_size, img_pixels)) norm_conf.output_x = norm_conf.img_size if norm.norm_type in ['cmrnorm-projection']: norm_conf.scale /= norm.size else: - norm_conf.scale /= norm.size ** 2 + norm_conf.scale /= norm.size**2 + ''' caffe_mode: compute the output size using floor instead of ceil, which is consistent of caffe and CuDNN's convention. ''' + + def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False): conv_conf.filter_size = conv.filter_size conv_conf.filter_size_y = conv.filter_size_y @@ -1152,36 +1173,37 @@ def parse_conv(conv, input_layer_name, conv_conf, num_filters, trans=False): conv_conf.stride_y = conv.stride_y conv_conf.groups = conv.groups conv_conf.caffe_mode = conv.caffe_mode - + if not trans: conv_conf.filter_channels = conv.channels / conv.groups img_pixels = g_layer_map[input_layer_name].size / conv.channels - print('channels=%d size=%d'%(conv.channels, - g_layer_map[input_layer_name].size)) - conv_conf.img_size = int(img_pixels ** 0.5) - config_assert((conv_conf.img_size ** 2) == img_pixels, - ("Input layer %s: Incorrect input image size %d for input " - + "image pixels %d") - % (input_layer_name, conv_conf.img_size, img_pixels)) - + print('channels=%d size=%d' % (conv.channels, + g_layer_map[input_layer_name].size)) + conv_conf.img_size = int(img_pixels**0.5) + config_assert((conv_conf.img_size**2) == img_pixels, ( + "Input layer %s: Incorrect input image size %d for input " + + "image pixels %d") % + (input_layer_name, conv_conf.img_size, img_pixels)) + conv_conf.output_x = cnn_output_size( - conv_conf.img_size, conv_conf.filter_size, - conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) + conv_conf.img_size, conv_conf.filter_size, conv_conf.padding, + conv_conf.stride, conv_conf.caffe_mode) else: conv_conf.filter_channels = num_filters / conv.groups - + outputSize = g_layer_map[input_layer_name].size / conv.channels - print('channels=%d size=%d'%(conv.channels, - g_layer_map[input_layer_name].size)) - conv_conf.output_x = int(outputSize ** 0.5) - config_assert((conv_conf.output_x ** 2) == outputSize, - ("Input layer %s: Incorrect input image size %d for input " - + "image pixels %d") - % (input_layer_name, conv_conf.output_x, outputSize)) + print('channels=%d size=%d' % (conv.channels, + g_layer_map[input_layer_name].size)) + conv_conf.output_x = int(outputSize**0.5) + config_assert((conv_conf.output_x**2) == outputSize, ( + "Input layer %s: Incorrect input image size %d for input " + + "image pixels %d") % + (input_layer_name, conv_conf.output_x, outputSize)) conv_conf.img_size = cnn_image_size( - conv_conf.output_x, conv_conf.filter_size, - conv_conf.padding, conv_conf.stride, conv_conf.caffe_mode) + conv_conf.output_x, conv_conf.filter_size, conv_conf.padding, + conv_conf.stride, conv_conf.caffe_mode) + def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand_conf.channels = block_expand.channels @@ -1207,27 +1229,28 @@ def parse_block_expand(block_expand, input_layer_name, block_expand_conf): block_expand.img_size_y, block_expand.block_y, block_expand.padding_y, block_expand.stride_y, False) + def parse_maxout(maxout, input_layer_name, maxout_conf): maxout_conf.channels = maxout.channels maxout_conf.groups = maxout.groups maxout_conf.img_size_x = maxout.img_size_x maxout_conf.img_size_y = maxout.img_size_y + # Define an evaluator @config_func def Evaluator( name, type, inputs, - chunk_scheme = None, - num_chunk_types = None, - classification_threshold = None, - positive_label = None, - dict_file = None, - result_file = None, - num_results = None, - delimited = None, - ): + chunk_scheme=None, + num_chunk_types=None, + classification_threshold=None, + positive_label=None, + dict_file=None, + result_file=None, + num_results=None, + delimited=None, ): evaluator = g_config.model_config.evaluators.add() evaluator.type = type evaluator.name = MakeLayerNameInSubmodel(name) @@ -1256,19 +1279,20 @@ def Evaluator( if delimited is not None: evaluator.delimited = delimited + class LayerBase(object): def __init__( self, name, type, - size, # size can be 0. In this case, subclass should set it. + size, # size can be 0. In this case, subclass should set it. inputs, device=None, active_type="", drop_rate=0., coeff=None): config_assert('@' not in name, - "layer name: %s contain special character @" % name) + "layer name: %s contain special character @" % name) global g_current_submodel name = MakeLayerNameInSubmodel(name) @@ -1307,8 +1331,8 @@ def __init__( if type_of(input) == str: input_layer_name = input input_config = Input( - input_layer_name = input, - parameter_name = gen_parameter_name(name, input_index)) + input_layer_name=input, + parameter_name=gen_parameter_name(name, input_index)) input_layer_name = input_config.input_layer_name elif isinstance(input, Input): input_layer_name = input.input_layer_name @@ -1317,16 +1341,15 @@ def __init__( input_config.parameter_name = \ gen_parameter_name(name, input_index) elif isinstance(input, Operator): - self.operators.append(input); + self.operators.append(input) input.operator_conf.input_indices.append(input_index) input_config = Input(input.input_layer_names[0]) input_layer_name = input_config.input_layer_name else: - raise ValueError( - 'Wrong type for inputs: %s' % type_of(input)) + raise ValueError('Wrong type for inputs: %s' % type_of(input)) config_assert(input_layer_name in g_layer_map, - "Unknown input layer '%s' for layer %s" - % (input_layer_name, name)) + "Unknown input layer '%s' for layer %s" % + (input_layer_name, name)) self.inputs[input_index] = input_config layer_input = self.config.inputs.add() layer_input.input_layer_name = input_config.input_layer_name @@ -1338,26 +1361,26 @@ def __init__( g_current_submodel.layer_names.append(self.config.name) - def get_input_layer(self, input_index): return g_layer_map[self.config.inputs[input_index].input_layer_name] # will return the bias created if not *for_self* def create_bias_parameter( self, - bias, # True/False or BiasCfg + bias, # True/False or BiasCfg size, - dims = None, - for_self = True, # whether create bias for layer self - ): + dims=None, + for_self=True, # whether create bias for layer self + ): if size == 0: return if dims is None: dims = [1, size] - config_assert(type_of(bias) == bool or type_of(bias) == Bias, - 'Incorrect type for bias: %s' % type_of(bias)) + config_assert( + type_of(bias) == bool or type_of(bias) == Bias, + 'Incorrect type for bias: %s' % type_of(bias)) if type_of(bias) == bool: if bias: @@ -1372,7 +1395,8 @@ def create_bias_parameter( Parameter( bias.parameter_name, size, - self.config.device if self.config.HasField('device') else None, + self.config.device + if self.config.HasField('device') else None, dims, bias.learning_rate, bias.momentum, @@ -1384,22 +1408,21 @@ def create_bias_parameter( initial_smart=bias.initial_smart, num_batches_regularization=bias.num_batches_regularization, sparse_remote_update=bias.sparse_remote_update, - gradient_clipping_threshold=bias.gradient_clipping_threshold, + gradient_clipping_threshold=bias. + gradient_clipping_threshold, is_static=bias.is_static, - is_shared=bias.is_shared, - ) + is_shared=bias.is_shared, ) if for_self: self.config.bias_parameter_name = bias.parameter_name else: return bias.parameter_name - def create_input_parameter( - self, - input_index, - size, - dims=None, - sparse = None, - format = None): + def create_input_parameter(self, + input_index, + size, + dims=None, + sparse=None, + format=None): if dims is None: # TODO(yuyang18): print warning and callstack here! dims = list() @@ -1414,12 +1437,12 @@ def create_input_parameter( if input_config.parameter_name in g_parameter_map: para = g_parameter_map[input_config.parameter_name] - config_assert(size == para.size, ('Shared parameter "%s" does not ' - + 'have same size: %s vs. %s') + config_assert(size == para.size, ( + 'Shared parameter "%s" does not ' + 'have same size: %s vs. %s') % (input_config.parameter_name, para.size, size)) - config_assert(dims == para.dims, ('Shared parameter "%s" does not ' - + 'have same dims: %s vs. %s') + config_assert(dims == para.dims, ( + 'Shared parameter "%s" does not ' + 'have same dims: %s vs. %s') % (input_config.parameter_name, para.dims, dims)) return @@ -1439,13 +1462,13 @@ def create_input_parameter( num_batches_regularization=input_config.num_batches_regularization, sparse_remote_update=input_config.sparse_remote_update, sparse_update=input_config.sparse_update, - gradient_clipping_threshold=input_config.gradient_clipping_threshold, + gradient_clipping_threshold=input_config. + gradient_clipping_threshold, sparse=sparse, format=format, is_static=input_config.is_static, is_shared=input_config.is_shared, - update_hooks=input_config.update_hooks - ) + update_hooks=input_config.update_hooks) def set_layer_size(self, size): if self.config.size == 0: @@ -1455,27 +1478,18 @@ def set_layer_size(self, size): 'Different inputs result in' + 'different layer size at layer %s' % self.config.name) + @config_layer('multi_class_cross_entropy_with_selfnorm') class MultiClassCrossEntropySelfNormCostLayer(LayerBase): - def __init__( - self, - name, - inputs, - softmax_selfnorm_alpha=0.1, - **xargs): - super(MultiClassCrossEntropySelfNormCostLayer, self).__init__(name, - 'multi_class_cross_entropy_with_selfnorm', 0, inputs, **xargs) + def __init__(self, name, inputs, softmax_selfnorm_alpha=0.1, **xargs): + super(MultiClassCrossEntropySelfNormCostLayer, self).__init__( + name, 'multi_class_cross_entropy_with_selfnorm', 0, inputs, **xargs) self.config.softmax_selfnorm_alpha = softmax_selfnorm_alpha + @config_layer('fc') class FCLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - bias=True, - **xargs): + def __init__(self, name, size, inputs, bias=True, **xargs): super(FCLayer, self).__init__(name, 'fc', size, inputs=inputs, **xargs) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) @@ -1489,22 +1503,23 @@ def __init__( else: sparse = None - self.create_input_parameter(input_index, psize, dims, sparse, format) + self.create_input_parameter(input_index, psize, dims, sparse, + format) self.create_bias_parameter(bias, self.config.size) + @config_layer('selective_fc') class SelectiveFCLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - bias=True, - selective_fc_pass_generation=False, - has_selected_colums=True, - selective_fc_full_mul_ratio=0.02, - selective_fc_parallel_plain_mul_thread_num=None, - **xargs): + def __init__(self, + name, + size, + inputs, + bias=True, + selective_fc_pass_generation=False, + has_selected_colums=True, + selective_fc_full_mul_ratio=0.02, + selective_fc_parallel_plain_mul_thread_num=None, + **xargs): super(SelectiveFCLayer, self).__init__( name, 'selective_fc', size, inputs=inputs, **xargs) # user MUST know if selctive fc is used in training, @@ -1525,8 +1540,8 @@ def __init__( input_num = len(self.inputs) if has_selected_colums: config_assert(input_num >= 2, - ("if indices of selected columns are not specified, " - "selective_fc Layer has at least two inputs")) + ("if indices of selected columns are not specified, " + "selective_fc Layer has at least two inputs")) input_num -= 1 for input_index in xrange(input_num): @@ -1539,26 +1554,23 @@ def __init__( if sparse: psize = self.inputs[input_index].nnz - self.create_input_parameter( - input_index, psize, dims, sparse, format) + self.create_input_parameter(input_index, psize, dims, sparse, + format) self.create_bias_parameter(bias, self.config.size) + @config_layer('print') class PrintLayer(LayerBase): - def __init__( - self, - name, - inputs): + def __init__(self, name, inputs): super(PrintLayer, self).__init__(name, 'print', 0, inputs) + @config_layer('data') class DataLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): - super(DataLayer, self).__init__(name, 'data' , size, inputs=[], device=device) + def __init__(self, name, size, device=None): + super(DataLayer, self).__init__( + name, 'data', size, inputs=[], device=device) + ''' DataNormLayer: A layer for data normalization @@ -1586,14 +1598,11 @@ def __init__( min-max: y = (x-min)/(max-min) decimal-scaling: y = x/10^j, where j is the smallest integer such that max(|y|)<1 ''' + + @config_layer('data_norm') class DataNormLayer(LayerBase): - def __init__( - self, - name, - inputs, - data_norm_strategy="z-score", - device=None): + def __init__(self, name, inputs, data_norm_strategy="z-score", device=None): super(DataNormLayer, self).__init__( name, 'data_norm', 0, inputs=inputs, device=device) self.config.data_norm_strategy = data_norm_strategy @@ -1605,15 +1614,12 @@ def __init__( self.inputs[0].is_static = True self.create_input_parameter(0, para_size, para_dims) + @config_layer('prelu') class ParameterReluLayer(LayerBase): layer_type = 'prelu' - def __init__( - self, - name, - inputs, - partial_sum = 1, - **args): + + def __init__(self, name, inputs, partial_sum=1, **args): super(ParameterReluLayer, self).__init__( name, self.layer_type, 0, inputs=inputs, **args) config_assert(len(self.inputs) == 1) @@ -1622,17 +1628,18 @@ def __init__( self.set_layer_size(input_layer.size) self.create_input_parameter(0, input_layer.size / partial_sum) + @config_layer('conv') class ConvLayerBase(LayerBase): layer_type = 'conv' - def __init__( - self, - name, - inputs=[], - bias=True, - num_filters=None, - shared_biases=False, - **xargs): + + def __init__(self, + name, + inputs=[], + bias=True, + num_filters=None, + shared_biases=False, + **xargs): super(ConvLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) @@ -1649,7 +1656,7 @@ def __init__( config_assert(use_gpu, "cudnn_conv only support GPU") if (use_gpu == 1 and self.layer_type != "exconv" and - (parallel_nn == 0 or self.config.device > -1)): + (parallel_nn == 0 or self.config.device > -1)): self.layer_type = "cudnn_conv" else: self.layer_type = "exconv" @@ -1661,17 +1668,14 @@ def __init__( for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_conv( - self.inputs[input_index].conv, - input_layer.name, - self.config.inputs[input_index].conv_conf, - num_filters) + parse_conv(self.inputs[input_index].conv, input_layer.name, + self.config.inputs[input_index].conv_conf, num_filters) conv_conf = self.config.inputs[input_index].conv_conf psize = self.calc_parameter_size(conv_conf) print("output size for %s is %d " % (name, conv_conf.output_x)) self.create_input_parameter(input_index, psize) self.set_layer_size( - (conv_conf.output_x ** 2) * self.config.num_filters) + (conv_conf.output_x**2) * self.config.num_filters) psize = self.config.size if shared_biases: @@ -1682,10 +1686,12 @@ def calc_parameter_size(self, conv_conf): return self.config.num_filters * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y) + @config_layer('exconv') class ConvLayer(ConvLayerBase): layer_type = 'exconv' + @config_layer('cudnn_conv') class ConvLayer(ConvLayerBase): layer_type = 'cudnn_conv' @@ -1694,14 +1700,14 @@ class ConvLayer(ConvLayerBase): @config_layer('convt') class ConvTransLayerBase(LayerBase): layer_type = 'convt' - def __init__( - self, - name, - inputs=[], - bias=True, - num_filters=None, - shared_biases=False, - **xargs): + + def __init__(self, + name, + inputs=[], + bias=True, + num_filters=None, + shared_biases=False, + **xargs): super(ConvTransLayerBase, self).__init__( name, self.layer_type, 0, inputs=inputs, **xargs) @@ -1732,7 +1738,7 @@ def __init__( print("output size for %s is %d " % (name, conv_conf.output_x)) self.create_input_parameter(input_index, psize) self.set_layer_size( - (conv_conf.img_size ** 2) * self.config.num_filters) + (conv_conf.img_size**2) * self.config.num_filters) psize = self.config.size if shared_biases: @@ -1743,85 +1749,76 @@ def calc_parameter_size(self, conv_conf): return conv_conf.channels * conv_conf.filter_channels \ * (conv_conf.filter_size * conv_conf.filter_size_y) + @config_layer('exconvt') class ConvTransLayer(ConvTransLayerBase): layer_type = 'exconvt' + @config_layer('norm') class NormLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(NormLayer, self).__init__(name, 'norm', 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(NormLayer, self).__init__( + name, 'norm', 0, inputs=inputs, device=device) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_norm( - self.inputs[input_index].norm, - input_layer.name, - self.config.inputs[input_index].norm_conf) + parse_norm(self.inputs[input_index].norm, input_layer.name, + self.config.inputs[input_index].norm_conf) norm_conf = self.config.inputs[input_index].norm_conf - self.set_layer_size((norm_conf.output_x ** 2) * norm_conf.channels) + self.set_layer_size((norm_conf.output_x**2) * norm_conf.channels) + @config_layer('pool') class PoolLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(PoolLayer, self).__init__(name, 'pool', 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(PoolLayer, self).__init__( + name, 'pool', 0, inputs=inputs, device=device) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_pool( - self.inputs[input_index].pool, - input_layer.name, - self.config.inputs[input_index].pool_conf) + parse_pool(self.inputs[input_index].pool, input_layer.name, + self.config.inputs[input_index].pool_conf) pool_conf = self.config.inputs[input_index].pool_conf - print("output size for %s is %d*%d " % ( - name, pool_conf.output_y, pool_conf.output_x)) - self.set_layer_size((pool_conf.output_x * pool_conf.output_y) * pool_conf.channels) + print("output size for %s is %d*%d " % (name, pool_conf.output_y, + pool_conf.output_x)) + self.set_layer_size( + (pool_conf.output_x * pool_conf.output_y) * pool_conf.channels) + @config_layer('spp') class SpatialPyramidPoolLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(SpatialPyramidPoolLayer, self).__init__(name, 'spp', 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(SpatialPyramidPoolLayer, self).__init__( + name, 'spp', 0, inputs=inputs, device=device) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_spp( - self.inputs[input_index].spp, - input_layer.name, - self.config.inputs[input_index].spp_conf) + parse_spp(self.inputs[input_index].spp, input_layer.name, + self.config.inputs[input_index].spp_conf) spp_conf = self.config.inputs[input_index].spp_conf output_size = (pow(4, spp_conf.pyramid_height) - 1) / (4 - 1) print("output size for %s is %d " % (name, output_size)) self.set_layer_size(output_size * spp_conf.channels) + @config_layer('batch_norm') class BatchNormLayer(LayerBase): layer_type = 'batch_norm' - def __init__( - self, - name, - inputs, - active_type="linear", - bias=True, - device=None, - use_global_stats=True, - moving_average_fraction=0.9, - batch_norm_type=None, - **xargs): + + def __init__(self, + name, + inputs, + active_type="linear", + bias=True, + device=None, + use_global_stats=True, + moving_average_fraction=0.9, + batch_norm_type=None, + **xargs): if inputs is None: inputs = [] elif not isinstance(inputs, list): inputs = [inputs] - config_assert(len(inputs) == 1, - "BatchNormLayer must have one and only one input") + config_assert( + len(inputs) == 1, "BatchNormLayer must have one and only one input") # Create Input for moving mean and std, # in batch normalization layer. # These paras no need to update, so set is_static is true. @@ -1830,12 +1827,13 @@ def __init__( use_gpu = bool(int(g_command_config_args.get("use_gpu", 0))) is_shared = True if not use_gpu else False for i in xrange(2): - inputs.append(Input(inputs[0].input_layer_name, - initial_std=0.0, - initial_mean=0.0, - is_static=True, - is_shared=is_shared, - )) + inputs.append( + Input( + inputs[0].input_layer_name, + initial_std=0.0, + initial_mean=0.0, + is_static=True, + is_shared=is_shared, )) parallel_nn = bool(int(g_command_config_args.get("parallel_nn", 0))) cudnn_version = int(g_command_config_args.get("cudnn_version", 0)) @@ -1845,21 +1843,25 @@ def __init__( ((not parallel_nn) or self.config.device > -1) and \ cudnn_version >= 4007 self.layer_type = "cudnn_batch_norm" if use_cudnn else "batch_norm" - super(BatchNormLayer, self).__init__(name, self.layer_type, 0, - active_type=active_type, - inputs=inputs, device=device, **xargs) + super(BatchNormLayer, self).__init__( + name, + self.layer_type, + 0, + active_type=active_type, + inputs=inputs, + device=device, + **xargs) if use_global_stats is not None: self.config.use_global_stats = use_global_stats if moving_average_fraction is not None: self.config.moving_average_fraction = moving_average_fraction - input_layer= self.get_input_layer(0) - parse_image(self.inputs[0].image, - input_layer.name, + input_layer = self.get_input_layer(0) + parse_image(self.inputs[0].image, input_layer.name, self.config.inputs[0].image_conf) image_conf = self.config.inputs[0].image_conf - self.set_layer_size((image_conf.img_size ** 2) * image_conf.channels) + self.set_layer_size((image_conf.img_size**2) * image_conf.channels) psize = self.calc_parameter_size(image_conf) dims = [1, psize] @@ -1872,75 +1874,74 @@ def __init__( def calc_parameter_size(self, image_conf): return image_conf.channels + @config_layer('trans') class TransLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(TransLayer, self).__init__(name, 'trans', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 1, - 'TransLayer must have one and only one input') + def __init__(self, name, inputs, device=None): + super(TransLayer, self).__init__( + name, 'trans', 0, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 1, + 'TransLayer must have one and only one input') self.set_layer_size(self.get_input_layer(0).size) + @config_layer('resize') class ResizeLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - device=None): - super(ResizeLayer, self).__init__(name, 'resize', size=size, inputs=inputs, device=device) - config_assert(len(self.inputs) == 1, - 'ResizeLayer must have one and only one input') + def __init__(self, name, size, inputs, device=None): + super(ResizeLayer, self).__init__( + name, 'resize', size=size, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 1, + 'ResizeLayer must have one and only one input') + @config_layer('blockexpand') class BlockExpandLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(BlockExpandLayer, self).__init__(name, 'blockexpand', 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(BlockExpandLayer, self).__init__( + name, 'blockexpand', 0, inputs=inputs, device=device) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) - parse_block_expand(self.inputs[input_index].block_expand, - input_layer.name, + parse_block_expand( + self.inputs[input_index].block_expand, input_layer.name, self.config.inputs[input_index].block_expand_conf) - block_expand_conf = self.config.inputs[input_index].block_expand_conf - self.set_layer_size(block_expand_conf.block_x * block_expand_conf.block_y - * block_expand_conf.channels) + block_expand_conf = self.config.inputs[ + input_index].block_expand_conf + self.set_layer_size(block_expand_conf.block_x * + block_expand_conf.block_y * + block_expand_conf.channels) + @config_layer('maxout') class MaxOutLayer(LayerBase): - def __init__( - self, - name, - inputs, - **xargs): - super(MaxOutLayer, self).__init__(name, 'maxout', 0, inputs=inputs, **xargs) + def __init__(self, name, inputs, **xargs): + super(MaxOutLayer, self).__init__( + name, 'maxout', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) - parse_maxout(self.inputs[0].maxout, - input_layer.name, + parse_maxout(self.inputs[0].maxout, input_layer.name, self.config.inputs[0].maxout_conf) maxout_conf = self.config.inputs[0].maxout_conf - self.set_layer_size(g_layer_map[input_layer.name].size / maxout_conf.groups) + self.set_layer_size(g_layer_map[input_layer.name].size / + maxout_conf.groups) + # key: cost type # value: cost class g_cost_map = {} + # define a cost layer without any parameters def define_cost(class_name, cost_type): def init(cls, name, inputs, device=None, coeff=1.): - super(type(cls), cls).__init__(name, cost_type, 1, inputs, device=device, coeff=coeff) + super(type(cls), cls).__init__( + name, cost_type, 1, inputs, device=device, coeff=coeff) - cls = type(class_name, (LayerBase,), dict(__init__=init)) + cls = type(class_name, (LayerBase, ), dict(__init__=init)) global g_cost_map g_cost_map[cost_type] = cls + define_cost('MultiClassCrossEntropy', 'multi-class-cross-entropy') define_cost('RankingCost', 'rank-cost') define_cost('AucValidation', 'auc-validation') @@ -1951,19 +1952,15 @@ def init(cls, name, inputs, device=None, coeff=1.): define_cost('HuberTwoClass', 'huber') define_cost('SumCost', 'sum_cost') + @config_layer('hsigmoid') class HierarchicalSigmoidLayer(LayerBase): - def __init__( - self, - name, - num_classes, - inputs, - device=None, - bias=True): + def __init__(self, name, num_classes, inputs, device=None, bias=True): super(HierarchicalSigmoidLayer, self).__init__( name, 'hsigmoid', 1, inputs=inputs, device=device) - config_assert(len(self.inputs) >= 2, - 'HierarchicalSigmoidLayer must have at least 2 inputs') + config_assert( + len(self.inputs) >= 2, + 'HierarchicalSigmoidLayer must have at least 2 inputs') self.config.num_classes = num_classes for input_index in xrange(len(self.inputs) - 1): input_layer = self.get_input_layer(input_index) @@ -1972,6 +1969,7 @@ def __init__( self.create_input_parameter(input_index, psize, dims) self.create_bias_parameter(bias, num_classes - 1) + ''' lambdaCost for lambdaRank LTR approach @@ -1996,59 +1994,57 @@ def __init__( max_sort_size can be greater than the size of a list, in which case the algorithm will sort the entire list to get gradient. ''' + + @config_layer('lambda_cost') class LambdaCost(LayerBase): - def __init__( - self, - name, - inputs, - NDCG_num = 5, - max_sort_size = -1, - device=None): + def __init__(self, name, inputs, NDCG_num=5, max_sort_size=-1, device=None): super(LambdaCost, self).__init__( name, 'lambda_cost', 1, inputs=inputs, device=device) - config_assert(len(self.inputs) == 2, - 'lambdaCost must have 2 inputs') + config_assert(len(self.inputs) == 2, 'lambdaCost must have 2 inputs') self.config.NDCG_num = NDCG_num if max_sort_size != -1: - config_assert(NDCG_num <= max_sort_size, - 'NDCG_num must be less than or equal to max_sort_size') + config_assert( + NDCG_num <= max_sort_size, + 'NDCG_num must be less than or equal to max_sort_size') self.config.max_sort_size = max_sort_size + @config_layer('nce') class NCELayer(LayerBase): - def __init__( - self, - name, - num_classes, - inputs, - num_neg_samples=10, - neg_sampling_dist=None, - bias=True, - **xargs): + def __init__(self, + name, + num_classes, + inputs, + num_neg_samples=10, + neg_sampling_dist=None, + bias=True, + **xargs): super(NCELayer, self).__init__(name, 'nce', 1, inputs=inputs, **xargs) - config_assert(len(self.inputs) >= 2, - 'NCELayer must have at least 2 inputs') + config_assert( + len(self.inputs) >= 2, 'NCELayer must have at least 2 inputs') self.config.num_classes = num_classes if neg_sampling_dist is not None: - config_assert(len(neg_sampling_dist) == num_classes, - 'len(neg_sampling_dist)(%s) is not same as num_classes (%s)' - % (len(neg_sampling_dist), num_classes)) + config_assert( + len(neg_sampling_dist) == num_classes, + 'len(neg_sampling_dist)(%s) is not same as num_classes (%s)' % + (len(neg_sampling_dist), num_classes)) s = sum(neg_sampling_dist) - config_assert(abs(s - 1) < 1e-5, - 'The sum of neg_sampling_dist (%s) is not 1' % s) + config_assert( + abs(s - 1) < 1e-5, + 'The sum of neg_sampling_dist (%s) is not 1' % s) self.config.neg_sampling_dist.extend(neg_sampling_dist) self.config.num_neg_samples = num_neg_samples num_real_inputs = len(self.inputs) - 1 - input_layer = self.get_input_layer(num_real_inputs) + input_layer = self.get_input_layer(num_real_inputs) config_assert(input_layer.type == 'data', 'Expecting the last input layer of an nce layer to be ' 'a data layer') - if (num_real_inputs > 1 and input_layer.size == 1 - and self.get_input_layer(num_real_inputs - 1).type == 'data'): + if (num_real_inputs > 1 and input_layer.size == 1 and + self.get_input_layer(num_real_inputs - 1).type == 'data'): # This input layer is assumed to be a sample weight layer num_real_inputs -= 1 @@ -2062,105 +2058,82 @@ def __init__( @config_layer('addto') class AddToLayer(LayerBase): - def __init__( - self, - name, - inputs, - bias=True, - **xargs): + def __init__(self, name, inputs, bias=True, **xargs): super(AddToLayer, self).__init__( name, 'addto', 0, inputs=inputs, **xargs) - config_assert(len(inputs) > 0, - 'inputs cannot be empty for AddToLayer') + config_assert(len(inputs) > 0, 'inputs cannot be empty for AddToLayer') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) + @config_layer('agent') class AgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): - super(AgentLayer, self).__init__(name, 'agent', size, inputs=[], device=device) + def __init__(self, name, size, device=None): + super(AgentLayer, self).__init__( + name, 'agent', size, inputs=[], device=device) + @config_layer('sequence_agent') class SequenceAgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): + def __init__(self, name, size, device=None): super(SequenceAgentLayer, self).__init__( name, 'sequence_agent', size, inputs=[], device=device) + @config_layer('gather_agent') class GatherAgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): + def __init__(self, name, size, device=None): super(GatherAgentLayer, self).__init__( name, 'gather_agent', size, inputs=[], device=device) + @config_layer('scatter_agent') class ScatterAgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): + def __init__(self, name, size, device=None): super(ScatterAgentLayer, self).__init__( name, 'scatter_agent', size, inputs=[], device=device) + @config_layer('sequence_gather_agent') class SequenceGatherAgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): + def __init__(self, name, size, device=None): super(SequenceGatherAgentLayer, self).__init__( - name, 'sequence_gather_agent', size, inputs=[], device=device) + name, 'sequence_gather_agent', size, inputs=[], device=device) + @config_layer('sequence_scatter_agent') class SequenceScatterAgentLayer(LayerBase): - def __init__( - self, - name, - size, - device=None): + def __init__(self, name, size, device=None): super(SequenceScatterAgentLayer, self).__init__( - name, 'sequence_scatter_agent', size, inputs=[], device=device) + name, 'sequence_scatter_agent', size, inputs=[], device=device) + @config_layer('multiplex') class MultiplexLayer(LayerBase): - def __init__( - self, - name, - inputs, - size, - device=None): - super(MultiplexLayer, self).__init__(name, 'multiplex', size, inputs=inputs, device=device) - config_assert(len(inputs) > 2, - 'MultiplexLayer should have more than 2 inputs.') + def __init__(self, name, inputs, size, device=None): + super(MultiplexLayer, self).__init__( + name, 'multiplex', size, inputs=inputs, device=device) + config_assert( + len(inputs) > 2, 'MultiplexLayer should have more than 2 inputs.') for i in range(1, len(inputs)): - config_assert(self.get_input_layer(i).size == size, - "All the input layers except the first one should" - "have the same size as the MultiplexLayer.") + config_assert( + self.get_input_layer(i).size == size, + "All the input layers except the first one should" + "have the same size as the MultiplexLayer.") + @config_func -def Link(name, - has_subseq=False, - ): +def Link( + name, + has_subseq=False, ): link_config = LinkConfig() link_config.link_name = name link_config.has_subseq = has_subseq return link_config + # memory for recurrent layer group. # *name* and *size* are actual layer's name and size. # will return name of the memory, @@ -2175,43 +2148,46 @@ def Link(name, # can only be initailized by a *boot_layer* which is a sequence. # @config_func -def Memory(name, - size, - is_sequence=False, - boot_layer=None, - boot_bias=False, - boot_bias_active_type="", - boot_with_const_id=None, - ): +def Memory( + name, + size, + is_sequence=False, + boot_layer=None, + boot_bias=False, + boot_bias_active_type="", + boot_with_const_id=None, ): agent_name = name + "+delay1" if is_sequence: agent_layer = SequenceAgentLayer(agent_name, size) else: agent_layer = AgentLayer(agent_name, size) config_assert(g_current_submodel.is_recurrent_layer_group, - 'Memory should be used in recurrent layer group only') + 'Memory should be used in recurrent layer group only') memory = g_current_submodel.memories.add() memory.layer_name = MakeLayerNameInSubmodel(name) memory.link_name = MakeLayerNameInSubmodel(agent_name) memory.is_sequence = is_sequence - options = sum((boot_layer is not None, - bool(boot_bias), + options = sum((boot_layer is not None, bool(boot_bias), boot_with_const_id is not None)) - config_assert(options <= 1, - 'take one option at most from boot_layer, boot_bias, or boot_with_const_id') + config_assert( + options <= 1, + 'take one option at most from boot_layer, boot_bias, or boot_with_const_id' + ) if boot_layer is not None: boot_layer = MakeLayerNameInParentSubmodel(boot_layer) config_assert(boot_layer in g_layer_map, - 'boot_layer "%s" does not correspond to a layer name' % boot_layer) + 'boot_layer "%s" does not correspond to a layer name' % + boot_layer) memory.boot_layer_name = boot_layer elif boot_bias: memory.boot_bias_parameter_name = agent_layer.create_bias_parameter( - boot_bias, size, for_self = False) + boot_bias, size, for_self=False) memory.boot_bias_active_type = boot_bias_active_type elif boot_with_const_id is not None: memory.boot_with_const_id = boot_with_const_id return agent_name + # Generator for recurrent layer group, to use it: # 1. define a id layer as output of layer group # 2. define a memory of this id layer, and assign a boot id(begin of sequence) @@ -2223,11 +2199,10 @@ def Memory(name, @config_func def Generator( max_num_frames, - eos_layer_name = "eos_check", - num_results_per_sample = 1, - beam_size = 1, - log_prob = None, - ): + eos_layer_name="eos_check", + num_results_per_sample=1, + beam_size=1, + log_prob=None, ): generator_config = GeneratorConfig() generator_config.max_num_frames = max_num_frames generator_config.eos_layer_name = eos_layer_name @@ -2237,60 +2212,55 @@ def Generator( generator_config.log_prob = log_prob return generator_config + @config_layer('expand') class ExpandLayer(LayerBase): - def __init__( - self, - name, - inputs, - trans_type='non-seq', - device=None, - bias=False): - super(ExpandLayer, self).__init__( - name, 'expand', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 2, - 'ExpandLayer takes 2 and only 2 inputs') - self.config.trans_type = trans_type - for input_index in xrange(len(self.inputs)): - input_layer = self.get_input_layer(input_index) - self.set_layer_size(self.get_input_layer(0).size) - self.create_bias_parameter(bias, self.config.size) + def __init__(self, + name, + inputs, + trans_type='non-seq', + device=None, + bias=False): + super(ExpandLayer, self).__init__( + name, 'expand', 0, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 2, 'ExpandLayer takes 2 and only 2 inputs') + self.config.trans_type = trans_type + for input_index in xrange(len(self.inputs)): + input_layer = self.get_input_layer(input_index) + self.set_layer_size(self.get_input_layer(0).size) + self.create_bias_parameter(bias, self.config.size) + @config_layer('featmap_expand') class FeatMapExpandLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None, - num_filters=None, - bias=False): - super(FeatMapExpandLayer, self).__init__( - name, 'featmap_expand', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 1, - 'ExpandLayer takes 1 and only 1 inputs') - if num_filters is not None: + def __init__(self, name, inputs, device=None, num_filters=None, bias=False): + super(FeatMapExpandLayer, self).__init__( + name, 'featmap_expand', 0, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 1, 'ExpandLayer takes 1 and only 1 inputs') + if num_filters is not None: self.config.num_filters = num_filters - else: + else: logger.fatal("FeatMapExpandLayer must specify num_filters.") - self.set_layer_size(self.get_input_layer(0).size * num_filters) + self.set_layer_size(self.get_input_layer(0).size * num_filters) @config_layer('max') class MaxLayer(LayerBase): - def __init__( - self, - name, - inputs, - trans_type='non-seq', - active_type='linear', - device=None, - bias=False, - output_max_index=None): - super(MaxLayer, self).__init__(name, 'max', 0, inputs=inputs, device=device) + def __init__(self, + name, + inputs, + trans_type='non-seq', + active_type='linear', + device=None, + bias=False, + output_max_index=None): + super(MaxLayer, self).__init__( + name, 'max', 0, inputs=inputs, device=device) config_assert(len(self.inputs) == 1, 'MaxLayer must have 1 input') - self.config.trans_type = trans_type - self.config.active_type = active_type + self.config.trans_type = trans_type + self.config.active_type = active_type for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) @@ -2301,12 +2271,7 @@ def __init__( @config_layer('maxid') class MaxIdLayer(LayerBase): - def __init__( - self, - name, - inputs, - beam_size=None, - device=None): + def __init__(self, name, inputs, beam_size=None, device=None): super(MaxIdLayer, self).__init__( name, 'maxid', 0, inputs=inputs, device=device) config_assert(len(self.inputs) == 1, 'MaxIdLayer must have 1 input') @@ -2324,37 +2289,39 @@ def __init__( @config_layer('eos_id') class EosIdLayer(LayerBase): - def __init__( - self, - name, - inputs, - eos_id, - device=None): + def __init__(self, name, inputs, eos_id, device=None): super(EosIdLayer, self).__init__( name, 'eos_id', 0, inputs=inputs, device=device) config_assert(len(self.inputs) == 1, 'EosIdLayer must have 1 input') - self.set_layer_size(2) # boolean output + self.set_layer_size(2) # boolean output self.config.eos_id = eos_id + @config_layer('seqlastins') class SequenceLastInstanceLayer(LayerBase): - def __init__( - self, - name, - inputs, - active_type='linear', - trans_type='non-seq', - device=None, - bias=False): - super(SequenceLastInstanceLayer, self).__init__(name, 'seqlastins', - 0, inputs=inputs, device=device, active_type=active_type) - config_assert(len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input') - self.config.trans_type = trans_type + def __init__(self, + name, + inputs, + active_type='linear', + trans_type='non-seq', + device=None, + bias=False): + super(SequenceLastInstanceLayer, self).__init__( + name, + 'seqlastins', + 0, + inputs=inputs, + device=device, + active_type=active_type) + config_assert( + len(inputs) == 1, 'SequenceLastInstanceLayer must have 1 input') + self.config.trans_type = trans_type for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) + @config_layer('seqfirstins') class SequenceFirstInstanceLayer(SequenceLastInstanceLayer): def __init__( @@ -2364,167 +2331,163 @@ def __init__( active_type='linear', trans_type='non-seq', device=None, - bias=False, - ): - super(SequenceFirstInstanceLayer, self).__init__(name, - inputs=inputs, active_type=active_type, device=device, bias=bias) - self.config.trans_type = trans_type + bias=False, ): + super(SequenceFirstInstanceLayer, self).__init__( + name, + inputs=inputs, + active_type=active_type, + device=device, + bias=bias) + self.config.trans_type = trans_type self.config.select_first = True + @config_layer('seqconcat') class SequenceConcatLayer(LayerBase): - def __init__( - self, - name, - inputs, - active_type='linear', - device=None, - bias=False): - super(SequenceConcatLayer, self).__init__(name, 'seqconcat', - 0, inputs=inputs, device=device, active_type=active_type) - config_assert(len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs') + def __init__(self, + name, + inputs, + active_type='linear', + device=None, + bias=False): + super(SequenceConcatLayer, self).__init__( + name, + 'seqconcat', + 0, + inputs=inputs, + device=device, + active_type=active_type) + config_assert( + len(inputs) == 2, 'SequenceConcatLayer must have 2 inputs') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) + @config_layer('seqreshape') class SequenceReshapeLayer(LayerBase): - def __init__( - self, - name, + def __init__(self, + name, + size, + inputs, + active_type='linear', + device=None, + bias=False): + super(SequenceReshapeLayer, self).__init__( + name, + 'seqreshape', size, - inputs, - active_type='linear', - device=None, - bias=False): - super(SequenceReshapeLayer, self).__init__(name, 'seqreshape', - size, inputs=inputs, device=device, active_type=active_type) - config_assert(len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs') + inputs=inputs, + device=device, + active_type=active_type) + config_assert( + len(inputs) == 1, 'SequenceReshapeLayer must have 1 inputs') self.set_layer_size(size) self.create_bias_parameter(bias, size) + @config_layer('subseq') class SubSequenceLayer(LayerBase): - def __init__( - self, - name, - inputs, - active_type='linear', - device=None, - bias=False): - super(SubSequenceLayer, self).__init__(name, 'subseq', - 0, inputs=inputs, device=device, active_type=active_type) + def __init__(self, + name, + inputs, + active_type='linear', + device=None, + bias=False): + super(SubSequenceLayer, self).__init__( + name, + 'subseq', + 0, + inputs=inputs, + device=device, + active_type=active_type) config_assert(len(inputs) == 3, 'SubSequenceLayer must have 3 inputs') input_layer0 = self.get_input_layer(0) size = input_layer0.size self.set_layer_size(size) self.create_bias_parameter(bias, size) + @config_layer('out_prod') class OuterProdLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(OuterProdLayer, self).__init__(name, 'out_prod', - 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(OuterProdLayer, self).__init__( + name, 'out_prod', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'OuterProdLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer0.size * input_layer1.size) + @config_layer('power') class PowerLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(PowerLayer, self).__init__(name, 'power', - 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(PowerLayer, self).__init__( + name, 'power', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'PowerLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) input_layer0 = self.get_input_layer(0) - config_assert(1==input_layer0.size, - 'The left input is the exponent and should be of size 1') + config_assert(1 == input_layer0.size, + 'The left input is the exponent and should be of size 1') + @config_layer('slope_intercept') class SlopeInterceptLayer(LayerBase): - def __init__( - self, - name, - inputs, - slope=1.0, - intercept=0.0, - device=None): - super(SlopeInterceptLayer, self).__init__(name, 'slope_intercept', - 0, inputs=inputs, device=device) + def __init__(self, name, inputs, slope=1.0, intercept=0.0, device=None): + super(SlopeInterceptLayer, self).__init__( + name, 'slope_intercept', 0, inputs=inputs, device=device) self.config.slope = slope self.config.intercept = intercept config_assert(len(inputs) == 1, 'SlopeInterceptLayer must have 1 input') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) + @config_layer('scaling') class ScalingLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(ScalingLayer, self).__init__(name, 'scaling', - 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(ScalingLayer, self).__init__( + name, 'scaling', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ScalingLayer must have 2 inputs') input_layer1 = self.get_input_layer(1) self.set_layer_size(input_layer1.size) input_layer0 = self.get_input_layer(0) - config_assert(1==input_layer0.size, - 'The left input should be of size 1') + config_assert(1 == input_layer0.size, + 'The left input should be of size 1') + @config_layer('conv_shift') class ConvShiftLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): - super(ConvShiftLayer, self).__init__(name, 'conv_shift', - 0, inputs=inputs, device=device) + def __init__(self, name, inputs, device=None): + super(ConvShiftLayer, self).__init__( + name, 'conv_shift', 0, inputs=inputs, device=device) config_assert(len(inputs) == 2, 'ConvShiftLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) + @config_layer('convex_comb') class ConvexCombinationLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - device=None): + def __init__(self, name, size, inputs, device=None): super(ConvexCombinationLayer, self).__init__( - name, 'convex_comb', size, inputs=inputs, device=device) - config_assert(len(self.inputs) == 2, - 'ConvexCombinationLayer must have 2 inputs') + name, 'convex_comb', size, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 2, 'ConvexCombinationLayer must have 2 inputs') config_assert( size * self.get_input_layer(0).size == self.get_input_layer(1).size, 'Wrong input size for ConvexCombinationLayer') self.set_layer_size(size) + @config_layer('interpolation') class InterpolationLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): + def __init__(self, name, inputs, device=None): super(InterpolationLayer, self).__init__( name, 'interpolation', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 3, - 'InterpolationLayer must have 3 inputs') + config_assert( + len(self.inputs) == 3, 'InterpolationLayer must have 3 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) input_layer2 = self.get_input_layer(2) @@ -2533,64 +2496,51 @@ def __init__( config_assert(input_layer1.size == input_layer2.size, 'the two vector inputs should be of the same size') + @config_layer('bilinear_interp') class BilinearInterpLayer(LayerBase): - def __init__( - self, - name, - inputs, - **xargs): + def __init__(self, name, inputs, **xargs): super(BilinearInterpLayer, self).__init__( name, 'bilinear_interp', 0, inputs=inputs, **xargs) input_layer = self.get_input_layer(0) - parse_bilinear(self.inputs[0].bilinear_interp, - input_layer.name, - self.config.inputs[0].bilinear_interp_conf); + parse_bilinear(self.inputs[0].bilinear_interp, input_layer.name, + self.config.inputs[0].bilinear_interp_conf) conf = self.inputs[0].bilinear_interp - self.set_layer_size(conf.out_size_x * conf.out_size_y * conf.num_channels) + self.set_layer_size(conf.out_size_x * conf.out_size_y * + conf.num_channels) + @config_layer('sum_to_one_norm') class SumToOneNormLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): + def __init__(self, name, inputs, device=None): super(SumToOneNormLayer, self).__init__( - name, 'sum_to_one_norm', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 1, - 'SumToOneNormLayer must have 1 input') + name, 'sum_to_one_norm', 0, inputs=inputs, device=device) + config_assert( + len(self.inputs) == 1, 'SumToOneNormLayer must have 1 input') input_layer0 = self.get_input_layer(0) self.set_layer_size(input_layer0.size) + @config_layer('cos_vm') class CosSimVecMatLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - cos_scale=1.0, - device=None): + def __init__(self, name, size, inputs, cos_scale=1.0, device=None): super(CosSimVecMatLayer, self).__init__( - name, 'cos_vm', size, inputs=inputs, device=device) + name, 'cos_vm', size, inputs=inputs, device=device) self.config.cos_scale = cos_scale - config_assert(len(self.inputs) == 2, - 'CosSimVecMatLayer must have 2 inputs') + config_assert( + len(self.inputs) == 2, 'CosSimVecMatLayer must have 2 inputs') config_assert( size * self.get_input_layer(0).size == self.get_input_layer(1).size, 'Wrong input size for CosSimVecMatLayer') + @config_layer('sampling_id') class SamplingIdLayer(LayerBase): - def __init__( - self, - name, - inputs, - device=None): + def __init__(self, name, inputs, device=None): super(SamplingIdLayer, self).__init__( name, 'sampling_id', 0, inputs=inputs, device=device) - config_assert(len(self.inputs) == 1, 'SamplingIdLayer must have 1 input') + config_assert( + len(self.inputs) == 1, 'SamplingIdLayer must have 1 input') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) @@ -2603,33 +2553,33 @@ def __init__( # 'squarerootn': sum each sample, but divide by sqrt(sample_num). @config_layer('average') class AverageLayer(LayerBase): - def __init__( - self, - name, - inputs, - average_strategy='average', - trans_type='non-seq', - active_type='linear', - device=None, - bias=False): - super(AverageLayer, self).__init__(name, 'average', 0, inputs=inputs, - device=device, active_type=active_type) + def __init__(self, + name, + inputs, + average_strategy='average', + trans_type='non-seq', + active_type='linear', + device=None, + bias=False): + super(AverageLayer, self).__init__( + name, + 'average', + 0, + inputs=inputs, + device=device, + active_type=active_type) self.config.average_strategy = average_strategy - self.config.trans_type = trans_type + self.config.trans_type = trans_type config_assert(len(inputs) == 1, 'AverageLayer must have 1 input') for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) self.set_layer_size(input_layer.size) self.create_bias_parameter(bias, self.config.size) + @config_layer('cos') class CosSimLayer(LayerBase): - def __init__( - self, - name, - inputs, - cos_scale=5, - device=None): + def __init__(self, name, inputs, cos_scale=5, device=None): super(CosSimLayer, self).__init__( name, 'cos', 1, inputs=inputs, device=device) config_assert(len(self.inputs) == 2, 'CosSimLayer must have 2 inputs') @@ -2641,18 +2591,13 @@ def __init__( @config_layer('tensor') class TensorLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - device=None, - bias=True, - **xargs): - super(TensorLayer, self).__init__(name, 'tensor', size, inputs=inputs, device=device, **xargs) + def __init__(self, name, size, inputs, device=None, bias=True, **xargs): + super(TensorLayer, self).__init__( + name, 'tensor', size, inputs=inputs, device=device, **xargs) config_assert(len(self.inputs) == 2, 'TensorLayer must have 2 inputs') config_assert(size > 0, 'size must be positive') - config_assert(inputs[1].parameter_name == None, 'second parameter should be None.') + config_assert(inputs[1].parameter_name == None, + 'second parameter should be None.') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) psize = size * input_layer0.size * input_layer1.size @@ -2663,14 +2608,13 @@ def __init__( @config_layer('mixed') class MixedLayer(LayerBase): - def __init__( - self, - name, - inputs, - size=0, - bias=True, - error_clipping_threshold=None, - **xargs): + def __init__(self, + name, + inputs, + size=0, + bias=True, + error_clipping_threshold=None, + **xargs): config_assert(inputs, 'inputs cannot be empty') super(MixedLayer, self).__init__( name, 'mixed', size, inputs=inputs, **xargs) @@ -2695,24 +2639,28 @@ def __init__( else: sz = operator.calc_output_size(operator_conf.input_sizes) if sz != 0: - config_assert(sz == self.config.size, - "different inputs have different size: %s vs. %s" % - (sz, self.config.size)) + config_assert( + sz == self.config.size, + "different inputs have different size: %s vs. %s" % + (sz, self.config.size)) for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] if input_index not in operator_input_index: - config_assert(isinstance(input, Projection), "input should be projection or operation") + config_assert( + isinstance(input, Projection), + "input should be projection or operation") if self.config.size == 0 and isinstance(input, Projection): size = input.calc_output_size(input_layer) if size != 0: self.set_layer_size(size) elif isinstance(input, Projection): - sz = input.calc_output_size(input_layer) - if sz != 0: - config_assert(sz == self.config.size, - "different inputs have different size: %s vs. %s" % - (sz, self.config.size)) + sz = input.calc_output_size(input_layer) + if sz != 0: + config_assert( + sz == self.config.size, + "different inputs have different size: %s vs. %s" % + (sz, self.config.size)) config_assert(size != 0, "size is not set") for input_index in xrange(len(self.inputs)): @@ -2724,7 +2672,8 @@ def __init__( input_config = self.config.inputs[input_index] input_config.proj_conf.CopyFrom(input.proj_conf) - input_config.proj_conf.name = gen_parameter_name(name, input_index) + input_config.proj_conf.name = gen_parameter_name(name, + input_index) psize = input.calc_parameter_size(input_layer.size, size) dims = input.calc_parameter_dims(input_layer.size, size) self.create_input_parameter(input_index, psize, dims) @@ -2750,21 +2699,16 @@ def __init__( if error_clipping_threshold is not None: self.config.error_clipping_threshold = error_clipping_threshold + # like MixedLayer, but no bias parameter @config_func -def ExpressionLayer(name, - inputs, - **xargs): +def ExpressionLayer(name, inputs, **xargs): MixedLayer(name, inputs, bias=False, **xargs) + @config_layer('concat') class ConcatenateLayer(LayerBase): - def __init__( - self, - name, - inputs, - bias=False, - **xargs): + def __init__(self, name, inputs, bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') config_assert(not bias, 'ConcatenateLayer cannot support bias.') super(ConcatenateLayer, self).__init__( @@ -2773,30 +2717,27 @@ def __init__( for input_index in xrange(len(self.inputs)): input_layer = self.get_input_layer(input_index) input = self.inputs[input_index] - if self.config.size == 0: + if self.config.size == 0: size += input_layer.size self.set_layer_size(size) + # like concat layer, but each input layer was processed by a Projection. @config_layer('concat2') class ConcatenateLayer2(LayerBase): - def __init__( - self, - name, - inputs, - bias=False, - **xargs): + def __init__(self, name, inputs, bias=False, **xargs): config_assert(inputs, 'inputs cannot be empty') super(ConcatenateLayer2, self).__init__( name, 'concat2', 0, inputs=inputs, **xargs) if isinstance(self.inputs[0], ConvProjection): - for input_index in xrange(len(self.inputs) - 1): - input = self.inputs[input_index + 1] - config_assert(isinstance(input, ConvProjection), - "The first input of ConcatenateLayer2 is ConvProjection, " - "the other inputs should also be ConvProjection.") + for input_index in xrange(len(self.inputs) - 1): + input = self.inputs[input_index + 1] + config_assert( + isinstance(input, ConvProjection), + "The first input of ConcatenateLayer2 is ConvProjection, " + "the other inputs should also be ConvProjection.") size = 0 for input_index in xrange(len(self.inputs)): @@ -2818,9 +2759,9 @@ def __init__( input_config.proj_conf.CopyFrom(input.proj_conf) input_config.proj_conf.name = gen_parameter_name(name, input_index) psize = input.calc_parameter_size(input.proj_conf.input_size, - input.proj_conf.output_size) + input.proj_conf.output_size) dims = input.calc_parameter_dims(input.proj_conf.input_size, - input.proj_conf.output_size) + input.proj_conf.output_size) self.create_input_parameter(input_index, psize, dims) psize = self.config.size @@ -2834,16 +2775,12 @@ def __init__( self.config.bias_size = psize self.create_bias_parameter(bias, psize) + @config_layer('recurrent') class RecurrentLayer(LayerBase): - def __init__( - self, - name, - inputs, - reversed=False, - bias=True, - **xargs): - super(RecurrentLayer, self).__init__(name, 'recurrent', 0, inputs, **xargs) + def __init__(self, name, inputs, reversed=False, bias=True, **xargs): + super(RecurrentLayer, self).__init__(name, 'recurrent', 0, inputs, ** + xargs) config_assert(len(self.inputs) == 1, 'RecurrentLayer must have 1 input') input_layer = self.get_input_layer(0) size = input_layer.size @@ -2853,17 +2790,17 @@ def __init__( self.create_input_parameter(0, size * size, dims) self.create_bias_parameter(bias, self.config.size) + @config_layer('lstmemory') class LstmLayer(LayerBase): - def __init__( - self, - name, - inputs, - reversed=False, - active_gate_type="sigmoid", - active_state_type="sigmoid", - bias=True, - **xargs): + def __init__(self, + name, + inputs, + reversed=False, + active_gate_type="sigmoid", + active_state_type="sigmoid", + bias=True, + **xargs): super(LstmLayer, self).__init__(name, 'lstmemory', 0, inputs, **xargs) config_assert(len(self.inputs) == 1, 'LstmLayer must have 1 input') input_layer = self.get_input_layer(0) @@ -2872,117 +2809,126 @@ def __init__( size = input_layer.size / 4 self.set_layer_size(size) self.config.reversed = reversed - self.config.active_gate_type = active_gate_type + self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type self.create_input_parameter(0, size * size * 4, [size, size, 4]) #bias includes 3 kinds of peephole, 4 + 3 = 7 self.create_bias_parameter(bias, size * 7) + @config_layer('lstm_step') class LstmStepLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - active_gate_type="sigmoid", - active_state_type="sigmoid", - bias=True, - **xargs): - super(LstmStepLayer, self).__init__(name, 'lstm_step', - size, inputs, **xargs) + def __init__(self, + name, + size, + inputs, + active_gate_type="sigmoid", + active_state_type="sigmoid", + bias=True, + **xargs): + super(LstmStepLayer, self).__init__(name, 'lstm_step', size, inputs, + **xargs) config_assert(len(inputs) == 2, 'LstmStepLayer must have 2 inputs') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) - config_assert(input_layer0.size == 4 * size, 'input_layer0.size != 4 * layer.size') - config_assert(input_layer1.size == size, 'input_layer1.size != layer.size') - self.config.active_gate_type = active_gate_type + config_assert(input_layer0.size == 4 * size, + 'input_layer0.size != 4 * layer.size') + config_assert(input_layer1.size == size, + 'input_layer1.size != layer.size') + self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type self.create_bias_parameter(bias, size * 3) + # get the specific output from the input layer. @config_layer('get_output') class GetOutputLayer(LayerBase): - def __init__( - self, - name, - size, - inputs): - super(GetOutputLayer, self).__init__(name, 'get_output' , size, inputs) - config_assert(len(self.inputs) == 1, 'GetOutputLayer must have 1 inputs') + def __init__(self, name, size, inputs): + super(GetOutputLayer, self).__init__(name, 'get_output', size, inputs) + config_assert( + len(self.inputs) == 1, 'GetOutputLayer must have 1 inputs') inputs = self.inputs[0] config_assert(inputs.input_layer_argument, 'input_layer_argument cannot be empty') + @config_layer('mdlstmemory') class MDLstmLayer(LayerBase): - def __init__( - self, - name, - inputs, - directions=True, - active_gate_type="sigmoid", - active_state_type="sigmoid", - bias=True, - **xargs): - super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs, **xargs) + def __init__(self, + name, + inputs, + directions=True, + active_gate_type="sigmoid", + active_state_type="sigmoid", + bias=True, + **xargs): + super(MDLstmLayer, self).__init__(name, 'mdlstmemory', 0, inputs, ** + xargs) config_assert(len(self.inputs) == 1, 'MDLstmLayer must have 1 input') input_layer = self.get_input_layer(0) dim_num = len(directions) #check input_layer.size is divided by (3+dim_num) - config_assert(input_layer.size % (3+dim_num) == 0, "size % (dim_num) should be 0!") - size = input_layer.size / (3+dim_num) + config_assert(input_layer.size % + (3 + dim_num) == 0, "size % (dim_num) should be 0!") + size = input_layer.size / (3 + dim_num) self.set_layer_size(size) - self.config.active_gate_type = active_gate_type + self.config.active_gate_type = active_gate_type self.config.active_state_type = active_state_type for i in xrange(len(directions)): self.config.directions.append(int(directions[i])) - self.create_input_parameter(0, size * size * (3+dim_num), [size, size, 3+dim_num]) + self.create_input_parameter(0, size * size * + (3 + dim_num), [size, size, 3 + dim_num]) #bias includes 3 kinds of peephole, 3+dim_num+2+dim_num - self.create_bias_parameter(bias, size * (5+2*dim_num)) + self.create_bias_parameter(bias, size * (5 + 2 * dim_num)) + @config_layer('gated_recurrent') class GatedRecurrentLayer(LayerBase): - def __init__( - self, - name, - inputs, - reversed=False, - active_gate_type="sigmoid", - bias=True, - **xargs): - super(GatedRecurrentLayer, self).__init__(name, 'gated_recurrent', 0, inputs, **xargs) - config_assert(len(self.inputs) == 1, 'GatedRecurrentLayer must have 1 input') + def __init__(self, + name, + inputs, + reversed=False, + active_gate_type="sigmoid", + bias=True, + **xargs): + super(GatedRecurrentLayer, self).__init__(name, 'gated_recurrent', 0, + inputs, **xargs) + config_assert( + len(self.inputs) == 1, 'GatedRecurrentLayer must have 1 input') input_layer = self.get_input_layer(0) #check input_layer.size is divided by 3 config_assert(input_layer.size % 3 == 0, "size % 3 should be 0!") size = input_layer.size / 3 self.set_layer_size(size) self.config.reversed = reversed - self.config.active_gate_type = active_gate_type + self.config.active_gate_type = active_gate_type self.create_input_parameter(0, size * size * 3, [size, size * 3]) self.create_bias_parameter(bias, size * 3) + @config_layer('gru_step') class GruStepLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - active_gate_type="sigmoid", - bias=True, - **xargs): - super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs, **xargs) + def __init__(self, + name, + size, + inputs, + active_gate_type="sigmoid", + bias=True, + **xargs): + super(GruStepLayer, self).__init__(name, 'gru_step', size, inputs, ** + xargs) config_assert(len(self.inputs) == 2, 'GruStepLayer must have 2 input') input_layer0 = self.get_input_layer(0) input_layer1 = self.get_input_layer(1) - config_assert(input_layer0.size == 3 * size, 'input_layer0.size != 3 * layer.size') - config_assert(input_layer1.size == size, 'input_layer1.size != layer.size') - self.config.active_gate_type = active_gate_type + config_assert(input_layer0.size == 3 * size, + 'input_layer0.size != 3 * layer.size') + config_assert(input_layer1.size == size, + 'input_layer1.size != layer.size') + self.config.active_gate_type = active_gate_type self.create_input_parameter(0, size * size * 3, [size, size * 3]) self.create_bias_parameter(bias, size * 3) + ''' A layer for calculating the cost of sequential conditional random field model. Example: CRFLayer(name="crf_cost", size=label_num, @@ -2990,20 +2936,18 @@ def __init__( where "weight" is optional, one weight for each sequence @param coeff: weight of the layer ''' + + @config_layer('crf') class CRFLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - coeff=1.0, - device=None): + def __init__(self, name, size, inputs, coeff=1.0, device=None): super(CRFLayer, self).__init__(name, 'crf', size, inputs, device=device) - config_assert(2 <= len(self.inputs) <= 3, 'CRFLayer must have 2 or 3 inputs') + config_assert(2 <= len(self.inputs) <= 3, + 'CRFLayer must have 2 or 3 inputs') self.create_input_parameter(0, size * (size + 2), [size, size + 2]) self.config.coeff = coeff + ''' A layer for calculating the decoding sequence of sequential conditional random field model. @@ -3012,14 +2956,11 @@ def __init__( this layer will also calculate error, output_.value[i] is 1 for incorrect decoding or 0 for correct decoding ''' + + @config_layer('crf_decoding') class CRFDecodingLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - device=None): + def __init__(self, name, size, inputs, device=None): super(CRFDecodingLayer, self).__init__( name, 'crf_decoding', size, inputs, device=device) config_assert( @@ -3027,47 +2968,35 @@ def __init__( 'CRFDecodingLayer cannot have more than 2 inputs') self.create_input_parameter(0, size * (size + 2), [size, size + 2]) + @config_layer('ctc') class CTCLayer(LayerBase): - def __init__( - self, - name, - size, - inputs, - norm_by_times = False, - device=None): + def __init__(self, name, size, inputs, norm_by_times=False, device=None): super(CTCLayer, self).__init__(name, 'ctc', size, inputs, device=device) self.config.norm_by_times = norm_by_times config_assert(len(self.inputs) == 2, 'CTCLayer must have 2 inputs') + @config_layer('recurrent_layer_group') class RecurrentLayerGroup(LayerBase): - def __init__( - self, - name, - device=None): + def __init__(self, name, device=None): super(RecurrentLayerGroup, self).__init__( name, 'recurrent_layer_group', 0, inputs=[], device=device) # Deprecated, use a new layer specific class instead @config_func -def Layer( - name, - type, - **xargs): +def Layer(name, type, **xargs): layers = {} layers.update(g_cost_map) layers.update(g_layer_type_map) layer_func = layers.get(type) - config_assert(layer_func, - "layer type '%s' not supported." % type) + config_assert(layer_func, "layer type '%s' not supported." % type) return layer_func(name, **xargs) + @config_func -def ParameterHook( - type, - **kwargs): +def ParameterHook(type, **kwargs): if type == 'pruning': mask_filename = kwargs.get('mask_filename', None) assert mask_filename is not None @@ -3080,30 +3009,28 @@ def ParameterHook( @config_func -def Parameter( - name, - size, - device, - dims, - learning_rate=None, - momentum=None, - decay_rate=None, - decay_rate_l1=None, - initial_mean=None, - initial_std=None, - initial_strategy=None, - initial_smart=None, - num_batches_regularization=None, - sparse_remote_update=None, - sparse_update=None, - gradient_clipping_threshold=None, - sparse=None, - format=None, - need_compact=None, - is_static=None, - is_shared=None, - update_hooks=None - ): +def Parameter(name, + size, + device, + dims, + learning_rate=None, + momentum=None, + decay_rate=None, + decay_rate_l1=None, + initial_mean=None, + initial_std=None, + initial_strategy=None, + initial_smart=None, + num_batches_regularization=None, + sparse_remote_update=None, + sparse_update=None, + gradient_clipping_threshold=None, + sparse=None, + format=None, + need_compact=None, + is_static=None, + is_shared=None, + update_hooks=None): config_assert(name not in g_parameter_map, 'Duplicated parameter name: ' + name) @@ -3134,8 +3061,8 @@ def Parameter( para.initial_std = default(initial_std, g_default_initial_std) para.initial_mean = default(initial_mean, g_default_initial_mean) - num_batches_regularization = default( - num_batches_regularization, g_default_num_batches_regularization) + num_batches_regularization = default(num_batches_regularization, + g_default_num_batches_regularization) if num_batches_regularization is not None: para.num_batches_regularization = int(num_batches_regularization) @@ -3145,18 +3072,21 @@ def Parameter( g_config.opt_config.use_sparse_remote_updater = True if sparse_update is not None: para.sparse_update = sparse_update - gradient_clipping_threshold = default( - gradient_clipping_threshold, g_default_gradient_clipping_threshold) + gradient_clipping_threshold = default(gradient_clipping_threshold, + g_default_gradient_clipping_threshold) if gradient_clipping_threshold is not None: para.gradient_clipping_threshold = gradient_clipping_threshold - para.initial_strategy = default(initial_strategy, g_default_initial_strategy) + para.initial_strategy = default(initial_strategy, + g_default_initial_strategy) para.initial_smart = default(initial_smart, g_default_initial_smart) if para.initial_smart: para.initial_mean = 0. if len(para.dims) != 0: para.initial_std = 1. / math.sqrt(para.dims[0]) else: - print("Use initial_smart, but dims not set. Initial_smart may not be used in this layer") + print( + "Use initial_smart, but dims not set. Initial_smart may not be used in this layer" + ) traceback.print_exc() para.initial_std = 1. / math.sqrt(para.size) if g_default_compact_func is not None: @@ -3195,64 +3125,78 @@ def default_initial_std(val): global g_default_initial_std g_default_initial_std = val + @config_func def default_initial_mean(val): global g_default_initial_mean g_default_initial_mean = val + @config_func def default_initial_strategy(val): global g_default_initial_strategy g_default_initial_strategy = val + @config_func def default_initial_smart(val): global g_default_initial_smart g_default_initial_smart = val + @config_func def default_momentum(val): global g_default_momentum g_default_momentum = val + @config_func def default_decay_rate(val): global g_default_decay_rate g_default_decay_rate = val + @config_func def default_num_batches_regularization(val): global g_default_num_batches_regularization g_default_num_batches_regularization = val + @config_func def default_gradient_clipping_threshold(val): global g_default_gradient_clipping_threshold g_default_gradient_clipping_threshold = val + @config_func def default_device(val): global g_default_device g_default_device = val + @config_func def default_update_hooks(val): global g_default_update_hooks g_default_update_hooks = val + @config_func def default_compact_func(val): global g_default_compact_func g_default_compact_func = val + def make_importer(config_dir, config_args): def Import(config_file, local_args={}): if not config_file.startswith('/'): config_file = config_dir + '/' + config_file g_config.config_files.append(config_file) - execfile(config_file, make_config_environment(config_file, config_args), local_args) + execfile(config_file, + make_config_environment(config_file, config_args), local_args) + return Import + settings = dict( batch_size=None, mini_batch_size=None, @@ -3281,26 +3225,24 @@ def Import(config_file, local_args={}): ada_rou=0.95, delta_add_rate=1.0, shrink_parameter_value=0, - adam_beta1 = 0.9, - adam_beta2 = 0.999, - adam_epsilon = 1e-8, -) + adam_beta1=0.9, + adam_beta2=0.999, + adam_epsilon=1e-8, ) -settings_deprecated = dict( - usage_ratio=1., -) +settings_deprecated = dict(usage_ratio=1., ) trainer_settings = dict( save_dir="./output/model", init_model_path=None, - start_pass=0, -) + start_pass=0, ) + @config_func def Settings(**args): for k, v in args.iteritems(): if k == "usage_ratio": - logger.warning("Deprecated: define usage_ratio in DataConfig instead") + logger.warning( + "Deprecated: define usage_ratio in DataConfig instead") if g_config.HasField("data_config"): g_config.data_config.__setattr__(k, v) settings_deprecated[k] = v @@ -3312,10 +3254,12 @@ def Settings(**args): else: logger.fatal('Unkown setting: %s' % k) + @config_func def cluster_config(**args): pass + @config_func def EnableSubmodelSuffix(flag=True): """ @@ -3325,10 +3269,12 @@ def EnableSubmodelSuffix(flag=True): global g_add_submodel_suffix g_add_submodel_suffix = flag + def make_config_environment(config_file, config_args): def make_setter(k): def setter(v): logger.fatal("Obsolete: use Settings(%s=%s, ...) instead" % (k, v)) + return setter funcs = {} @@ -3344,13 +3290,13 @@ def setter(v): funcs.update( Import=make_importer(config_dir, config_args), - get_config_arg=make_get_config_arg(config_args), - ) + get_config_arg=make_get_config_arg(config_args), ) funcs.update(g_extended_config_funcs) return funcs + def make_get_config_arg(config_args): def get_config_arg(name, type, default=None): if type == bool: @@ -3367,6 +3313,7 @@ def get_config_arg(name, type, default=None): return get_config_arg + def importlib(name): __import__(name) return sys.modules[name] @@ -3379,10 +3326,12 @@ def find_caller(): return s[0], s[1], s[2] return "(unknown file)", 0, "(unknown function)" + def my_fatal(s): logger.critical(s) raise Exception() + def parse_config(config_file, config_arg_str): ''' @param config_arg_str: a string of the form var1=val1,var2=val2. It will be @@ -3420,7 +3369,7 @@ def parse_config(config_file, config_arg_str): for k, v in settings.iteritems(): if v is None: continue - g_config.opt_config.__setattr__(k, v); + g_config.opt_config.__setattr__(k, v) for k, v in trainer_settings.iteritems(): if v is None: @@ -3447,6 +3396,7 @@ def parse_config_and_serialize(config_file, config_arg_str): traceback.print_exc() raise + if __name__ == '__main__': try: config = parse_config(sys.argv[1], '') diff --git a/python/paddle/trainer/config_parser_extension.py b/python/paddle/trainer/config_parser_extension.py index 3445076274b0a..ba4c79efdc10e 100644 --- a/python/paddle/trainer/config_parser_extension.py +++ b/python/paddle/trainer/config_parser_extension.py @@ -17,11 +17,10 @@ g_config = None -def SimpleData( - files=None, - feat_dim=None, - context_len=None, - buffer_capacity=None): +def SimpleData(files=None, + feat_dim=None, + context_len=None, + buffer_capacity=None): data_config = DataConfig() data_config.type = 'simple' @@ -33,6 +32,7 @@ def SimpleData( data_config.buffer_capacity = buffer_capacity return data_config + def get_config_funcs(trainer_config): global g_config g_config = trainer_config diff --git a/python/paddle/trainer/recurrent_units.py b/python/paddle/trainer/recurrent_units.py index 7d51de78b0d79..a80ad13d1ed52 100644 --- a/python/paddle/trainer/recurrent_units.py +++ b/python/paddle/trainer/recurrent_units.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - + # recurrent_units.py # Version 2.0 # @@ -22,161 +22,175 @@ from paddle.trainer.config_parser import * + # long short term memory, can be used in recurrent machine # *inputs* must be a list of Projections, for example: # inputs = [FullMatrixProjection("input_layer_name")], # *para_prefix* defines parameter names, if the *para_prefix* of # two LstmRecurrentUnit is same, they share same parameters # *out_memory* can be defined outside if it's used outside -def LstmRecurrentUnit(name, size, - active_type, state_active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - out_memory = None): +def LstmRecurrentUnit(name, + size, + active_type, + state_active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + out_memory=None): - if para_prefix is None: + if para_prefix is None: para_prefix = name if out_memory is None: - out_memory = Memory(name = name, size = size) + out_memory = Memory(name=name, size=size) + + state_memory = Memory(name=name + "_" + "state", size=size) - state_memory = Memory(name = name + "_" + "state", size = size) - Layer( - name = name + "_" + "input_recurrent", - type = "mixed", - size = size * 4, #(input_s, input_gate, forget_gate, output_gate) - error_clipping_threshold = error_clipping_threshold, - bias = Bias(initial_std = 0, - parameter_name = para_prefix + "_input_recurrent.b"), - inputs = inputs + [ - FullMatrixProjection(out_memory, - parameter_name = para_prefix + "_input_recurrent.w"), - ], - ) + name=name + "_" + "input_recurrent", + type="mixed", + size=size * 4, #(input_s, input_gate, forget_gate, output_gate) + error_clipping_threshold=error_clipping_threshold, + bias=Bias( + initial_std=0, parameter_name=para_prefix + "_input_recurrent.b"), + inputs=inputs + [ + FullMatrixProjection( + out_memory, parameter_name=para_prefix + "_input_recurrent.w"), + ], ) LstmStepLayer( - name = name, - size = size, - bias = Bias(parameter_name = para_prefix + "_check.b"), - inputs = [name + "_" + "input_recurrent", state_memory], - active_type = active_type, - active_gate_type = gate_active_type, - active_state_type = state_active_type, - ) + name=name, + size=size, + bias=Bias(parameter_name=para_prefix + "_check.b"), + inputs=[name + "_" + "input_recurrent", state_memory], + active_type=active_type, + active_gate_type=gate_active_type, + active_state_type=state_active_type, ) GetOutputLayer( - name = name + "_" + "state", - size = size, - inputs = Input(name, input_layer_argument = "state"), - ) - -def LstmRecurrentUnitNaive(name, size, - active_type, state_active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - out_memory = None): - - if para_prefix is None: + name=name + "_" + "state", + size=size, + inputs=Input( + name, input_layer_argument="state"), ) + + +def LstmRecurrentUnitNaive(name, + size, + active_type, + state_active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + out_memory=None): + + if para_prefix is None: para_prefix = name if out_memory is None: - out_memory = Memory(name = name, size = size) + out_memory = Memory(name=name, size=size) + + state_memory = Memory(name=name + "_" + "state", size=size) - state_memory = Memory(name = name + "_" + "state", size = size) - Layer( - name = name + "_" + "input_recurrent", - type = "mixed", - size = size * 4, #(input_s, input_gate, forget_gate, output_gate) - error_clipping_threshold = error_clipping_threshold, - bias = Bias(initial_std = 0, - parameter_name = para_prefix + "_input_recurrent.b"), - inputs = inputs + [ - FullMatrixProjection(out_memory, - parameter_name = para_prefix + "_input_recurrent.w"), - ], - ) + name=name + "_" + "input_recurrent", + type="mixed", + size=size * 4, #(input_s, input_gate, forget_gate, output_gate) + error_clipping_threshold=error_clipping_threshold, + bias=Bias( + initial_std=0, parameter_name=para_prefix + "_input_recurrent.b"), + inputs=inputs + [ + FullMatrixProjection( + out_memory, parameter_name=para_prefix + "_input_recurrent.w"), + ], ) ExpressionLayer( - name = name + "_" + "input_s", - size = size, - active_type = active_type, - inputs = [IdentityOffsetProjection(name + "_" + "input_recurrent", offset=0)], - ) + name=name + "_" + "input_s", + size=size, + active_type=active_type, + inputs=[ + IdentityOffsetProjection( + name + "_" + "input_recurrent", offset=0) + ], ) ExpressionLayer( - name = name + "_" + "input_gate", - active_type = gate_active_type, - inputs = [IdentityOffsetProjection(name + "_" + "input_recurrent", offset=size), - DotMulProjection(state_memory, - parameter_name = para_prefix + "_input_check.w")], - ) + name=name + "_" + "input_gate", + active_type=gate_active_type, + inputs=[ + IdentityOffsetProjection( + name + "_" + "input_recurrent", offset=size), DotMulProjection( + state_memory, parameter_name=para_prefix + "_input_check.w") + ], ) ExpressionLayer( - name = name + "_" + "forget_gate", - active_type = gate_active_type, - inputs = [IdentityOffsetProjection(name + "_" + "input_recurrent", offset=size*2), - DotMulProjection(state_memory, - parameter_name = para_prefix + "_forget_check.w")], - ) + name=name + "_" + "forget_gate", + active_type=gate_active_type, + inputs=[ + IdentityOffsetProjection( + name + "_" + "input_recurrent", offset=size * 2), + DotMulProjection( + state_memory, parameter_name=para_prefix + "_forget_check.w") + ], ) ExpressionLayer( - name = name + "_" + "state", - inputs = [DotMulOperator([name + "_" + "input_s", - name + "_" + "input_gate"]), - DotMulOperator([state_memory, - name + "_" + "forget_gate"]), - ], - ) + name=name + "_" + "state", + inputs=[ + DotMulOperator([name + "_" + "input_s", name + "_" + "input_gate"]), + DotMulOperator([state_memory, name + "_" + "forget_gate"]), + ], ) ExpressionLayer( - name = name + "_" + "output_gate", - active_type = gate_active_type, - inputs = [IdentityOffsetProjection(name + "_" + "input_recurrent", offset=size*3), - DotMulProjection(name + "_" + "state", - parameter_name = para_prefix + "_output_check.w")], - ) + name=name + "_" + "output_gate", + active_type=gate_active_type, + inputs=[ + IdentityOffsetProjection( + name + "_" + "input_recurrent", offset=size * 3), + DotMulProjection( + name + "_" + "state", + parameter_name=para_prefix + "_output_check.w") + ], ) ExpressionLayer( - name = name + "_" + "state_atv", - active_type = state_active_type, - inputs = IdentityProjection(name + "_" + "state"), - ) + name=name + "_" + "state_atv", + active_type=state_active_type, + inputs=IdentityProjection(name + "_" + "state"), ) ExpressionLayer( - name = name, - inputs = DotMulOperator([name + "_" + "state_atv", - name + "_" + "output_gate"]), - ) + name=name, + inputs=DotMulOperator( + [name + "_" + "state_atv", name + "_" + "output_gate"]), ) + # like LstmRecurrentUnit, but it's a layer group. # it is equivalent to LstmLayer -def LstmRecurrentLayerGroup(name, size, - active_type, state_active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - seq_reversed = False): +def LstmRecurrentLayerGroup(name, + size, + active_type, + state_active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + seq_reversed=False): input_layer_name = name + "_" + "transform_input" Layer( - name = input_layer_name, - type = "mixed", - size = size * 4, - active_type = "", - bias = False, - inputs = inputs, - ) - - RecurrentLayerGroupBegin(name + "_layer_group", - in_links = [input_layer_name], - out_links = [name], - seq_reversed = seq_reversed) + name=input_layer_name, + type="mixed", + size=size * 4, + active_type="", + bias=False, + inputs=inputs, ) + + RecurrentLayerGroupBegin( + name + "_layer_group", + in_links=[input_layer_name], + out_links=[name], + seq_reversed=seq_reversed) LstmRecurrentUnit( - name = name, - size = size, - active_type = active_type, - state_active_type = state_active_type, - gate_active_type = gate_active_type, - inputs = [IdentityProjection(input_layer_name)], - para_prefix = para_prefix, - error_clipping_threshold = error_clipping_threshold, - ) + name=name, + size=size, + active_type=active_type, + state_active_type=state_active_type, + gate_active_type=gate_active_type, + inputs=[IdentityProjection(input_layer_name)], + para_prefix=para_prefix, + error_clipping_threshold=error_clipping_threshold, ) RecurrentLayerGroupEnd(name + "_layer_group") - # gated recurrent unit, can be used in recurrent machine # *inputs* should be a list of Projections, for example: # inputs = [FullMatrixProjection("input_layer_name")], @@ -184,142 +198,157 @@ def LstmRecurrentLayerGroup(name, size, # two GatedRecurrentUnit is same, they share same parameters # *out_memory* can be defined outside if it's used outside -def GatedRecurrentUnit(name, size, - active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - out_memory = None): - if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup + +def GatedRecurrentUnit(name, + size, + active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + out_memory=None): + if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup input_layer_name = inputs else: input_layer_name = name + "_" + "transform_input" Layer( - name = input_layer_name, - type = "mixed", - size = size * 3, - active_type = "", - bias = False, - inputs = inputs, - ) - - if para_prefix is None: + name=input_layer_name, + type="mixed", + size=size * 3, + active_type="", + bias=False, + inputs=inputs, ) + + if para_prefix is None: para_prefix = name if out_memory is None: - out_memory = Memory(name = name, size = size) + out_memory = Memory(name=name, size=size) GruStepLayer( - name = name, - size = size, - bias = Bias(parameter_name = para_prefix + "_gate.b"), - inputs = [input_layer_name, - Input(out_memory, parameter_name = para_prefix + "_gate.w")], - active_type = active_type, - active_gate_type = gate_active_type, - ) - -def GatedRecurrentUnitNaive(name, size, - active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - out_memory = None): - - if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup + name=name, + size=size, + bias=Bias(parameter_name=para_prefix + "_gate.b"), + inputs=[ + input_layer_name, Input( + out_memory, parameter_name=para_prefix + "_gate.w") + ], + active_type=active_type, + active_gate_type=gate_active_type, ) + + +def GatedRecurrentUnitNaive(name, + size, + active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + out_memory=None): + + if type_of(inputs) == str: #only used by GatedRecurrentLayerGroup input_layer_name = inputs else: input_layer_name = name + "_" + "transform_input" Layer( - name = input_layer_name, - type = "mixed", - size = size * 3, - active_type = "", - bias = False, - inputs = inputs, - ) - - if para_prefix is None: + name=input_layer_name, + type="mixed", + size=size * 3, + active_type="", + bias=False, + inputs=inputs, ) + + if para_prefix is None: para_prefix = name if out_memory is None: - out_memory = Memory(name = name, size = size) + out_memory = Memory(name=name, size=size) Layer( - name = name + "_" + "update_gate", - type = "mixed", - size = size, - active_type = gate_active_type, - error_clipping_threshold = error_clipping_threshold, - bias = Bias(initial_std = 0, parameter_name = para_prefix + "_update_gate.b"), - inputs = [IdentityOffsetProjection(input_layer_name, offset=0), - FullMatrixProjection(out_memory, - parameter_name = para_prefix + "_update_gate.w")], - ) + name=name + "_" + "update_gate", + type="mixed", + size=size, + active_type=gate_active_type, + error_clipping_threshold=error_clipping_threshold, + bias=Bias( + initial_std=0, parameter_name=para_prefix + "_update_gate.b"), + inputs=[ + IdentityOffsetProjection( + input_layer_name, offset=0), FullMatrixProjection( + out_memory, parameter_name=para_prefix + "_update_gate.w") + ], ) Layer( - name = name + "_" + "reset_gate", - type = "mixed", - size = size, - active_type = gate_active_type, - error_clipping_threshold = error_clipping_threshold, - bias = Bias(initial_std = 0, parameter_name = para_prefix + "_reset_gate.b"), - inputs = [IdentityOffsetProjection(input_layer_name, offset=size), - FullMatrixProjection(out_memory, - parameter_name = para_prefix + "_reset_gate.w")], - ) + name=name + "_" + "reset_gate", + type="mixed", + size=size, + active_type=gate_active_type, + error_clipping_threshold=error_clipping_threshold, + bias=Bias( + initial_std=0, parameter_name=para_prefix + "_reset_gate.b"), + inputs=[ + IdentityOffsetProjection( + input_layer_name, offset=size), FullMatrixProjection( + out_memory, parameter_name=para_prefix + "_reset_gate.w") + ], ) ExpressionLayer( - name = name + "_" + "reset_output", - inputs = DotMulOperator([out_memory, name + "_" + "reset_gate"]), - ) + name=name + "_" + "reset_output", + inputs=DotMulOperator([out_memory, name + "_" + "reset_gate"]), ) Layer( - name = name + "_" + "output_candidate", - type = "mixed", - size = size, - active_type = active_type, - error_clipping_threshold = error_clipping_threshold, - bias = Bias(initial_std = 0, parameter_name = para_prefix + "_output_candidate.b"), - inputs = [IdentityOffsetProjection(input_layer_name, offset=size*2), - FullMatrixProjection(name + "_" + "reset_output", - parameter_name = para_prefix + "_output_candidate.w")], - ) - ExpressionLayer( #element-wise interpolation - name = name, - inputs = [IdentityProjection(out_memory), - DotMulOperator([out_memory, - name + "_" + "update_gate"], scale=-1.0), - DotMulOperator([name + "_" + "output_candidate", - name + "_" + "update_gate"]), - ], - ) + name=name + "_" + "output_candidate", + type="mixed", + size=size, + active_type=active_type, + error_clipping_threshold=error_clipping_threshold, + bias=Bias( + initial_std=0, parameter_name=para_prefix + "_output_candidate.b"), + inputs=[ + IdentityOffsetProjection( + input_layer_name, offset=size * 2), FullMatrixProjection( + name + "_" + "reset_output", + parameter_name=para_prefix + "_output_candidate.w") + ], ) + ExpressionLayer( #element-wise interpolation + name=name, + inputs=[ + IdentityProjection(out_memory), + DotMulOperator( + [out_memory, name + "_" + "update_gate"], scale=-1.0), + DotMulOperator( + [name + "_" + "output_candidate", name + "_" + "update_gate"]), + ], ) + # like GatedRecurrentUnit, but it's a layer group. # it is equivalent to GatedRecurrentLayer. -def GatedRecurrentLayerGroup(name, size, - active_type, gate_active_type, - inputs, para_prefix = None, - error_clipping_threshold = 0, - seq_reversed = False): +def GatedRecurrentLayerGroup(name, + size, + active_type, + gate_active_type, + inputs, + para_prefix=None, + error_clipping_threshold=0, + seq_reversed=False): input_layer_name = name + "_" + "transform_input" Layer( - name = input_layer_name, - type = "mixed", - size = size * 3, - active_type = "", - bias = False, - inputs = inputs, - ) - - RecurrentLayerGroupBegin(name + "_layer_group", - in_links = [input_layer_name], - out_links = [name], - seq_reversed = seq_reversed) + name=input_layer_name, + type="mixed", + size=size * 3, + active_type="", + bias=False, + inputs=inputs, ) + + RecurrentLayerGroupBegin( + name + "_layer_group", + in_links=[input_layer_name], + out_links=[name], + seq_reversed=seq_reversed) GatedRecurrentUnit( - name = name, - size = size, - active_type = active_type, - gate_active_type = gate_active_type, - inputs = input_layer_name, #transform outside - para_prefix = para_prefix, - error_clipping_threshold = error_clipping_threshold, - ) + name=name, + size=size, + active_type=active_type, + gate_active_type=gate_active_type, + inputs=input_layer_name, #transform outside + para_prefix=para_prefix, + error_clipping_threshold=error_clipping_threshold, ) RecurrentLayerGroupEnd(name + "_layer_group") - diff --git a/python/paddle/trainer_config_helpers/activations.py b/python/paddle/trainer_config_helpers/activations.py index 2202d0bf96976..6261934e1bc8e 100644 --- a/python/paddle/trainer_config_helpers/activations.py +++ b/python/paddle/trainer_config_helpers/activations.py @@ -12,13 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -__all__ = ["TanhActivation", "SigmoidActivation", - "SoftmaxActivation", "IdentityActivation", "LinearActivation", - 'SequenceSoftmaxActivation', 'ExpActivation', - "ReluActivation", "BReluActivation", "SoftReluActivation", - "STanhActivation", - "AbsActivation", "SquareActivation", - "BaseActivation"] +__all__ = [ + "TanhActivation", "SigmoidActivation", "SoftmaxActivation", + "IdentityActivation", "LinearActivation", 'SequenceSoftmaxActivation', + 'ExpActivation', "ReluActivation", "BReluActivation", "SoftReluActivation", + "STanhActivation", "AbsActivation", "SquareActivation", "BaseActivation" +] class BaseActivation(object): @@ -51,7 +50,8 @@ class TanhActivation(BaseActivation): f(z)=tanh(z)=\\frac{e^z-e^{-z}}{e^z+e^{-z}} """ - def __init__(self): BaseActivation.__init__(self, 'tanh', True) + def __init__(self): + BaseActivation.__init__(self, 'tanh', True) class SigmoidActivation(BaseActivation): @@ -63,7 +63,8 @@ class SigmoidActivation(BaseActivation): f(z) = \\frac{1}{1+exp(-z)} """ - def __init__(self): BaseActivation.__init__(self, 'sigmoid', True) + def __init__(self): + BaseActivation.__init__(self, 'sigmoid', True) class SoftmaxActivation(BaseActivation): @@ -104,7 +105,8 @@ class IdentityActivation(BaseActivation): Just do nothing for output both forward/backward. """ - def __init__(self): BaseActivation.__init__(self, '', False) + def __init__(self): + BaseActivation.__init__(self, '', False) LinearActivation = IdentityActivation @@ -124,7 +126,8 @@ class ReluActivation(BaseActivation): 0 &\\quad\\mathrm{otherwize} """ - def __init__(self): BaseActivation.__init__(self, 'relu', True) + def __init__(self): + BaseActivation.__init__(self, 'relu', True) class BReluActivation(BaseActivation): @@ -141,7 +144,8 @@ class BReluActivation(BaseActivation): 0 &\\quad \\mathrm{otherwise} """ - def __init__(self): BaseActivation.__init__(self, 'brelu', False) + def __init__(self): + BaseActivation.__init__(self, 'brelu', False) class SoftReluActivation(BaseActivation): @@ -149,7 +153,9 @@ class SoftReluActivation(BaseActivation): SoftRelu Activation. """ - def __init__(self): BaseActivation.__init__(self, 'softrelu', False) + def __init__(self): + BaseActivation.__init__(self, 'softrelu', False) + class STanhActivation(BaseActivation): """ @@ -160,7 +166,8 @@ class STanhActivation(BaseActivation): f(z) = 1.7159 * tanh(2/3*z) """ - def __init__(self): BaseActivation.__init__(self, 'stanh', False) + def __init__(self): + BaseActivation.__init__(self, 'stanh', False) class AbsActivation(BaseActivation): @@ -178,7 +185,8 @@ class AbsActivation(BaseActivation): 0 &\\quad if \\quad z = 0 """ - def __init__(self): BaseActivation.__init__(self, 'abs', False) + def __init__(self): + BaseActivation.__init__(self, 'abs', False) class SquareActivation(BaseActivation): @@ -189,7 +197,9 @@ class SquareActivation(BaseActivation): f(z) = z^2. """ - def __init__(self): BaseActivation.__init__(self, 'square', False) + def __init__(self): + BaseActivation.__init__(self, 'square', False) + class ExpActivation(BaseActivation): """ @@ -198,7 +208,10 @@ class ExpActivation(BaseActivation): .. math:: f(z) = e^z. """ - def __init__(self): BaseActivation.__init__(self, 'exponential', False) + + def __init__(self): + BaseActivation.__init__(self, 'exponential', False) + class LogActivation(BaseActivation): """ @@ -207,4 +220,6 @@ class LogActivation(BaseActivation): .. math:: f(z) = log(z) """ - def __init__(self): BaseActivation.__init__(self, 'log', False) + + def __init__(self): + BaseActivation.__init__(self, 'log', False) diff --git a/python/paddle/trainer_config_helpers/attrs.py b/python/paddle/trainer_config_helpers/attrs.py index d263441247332..54169f382f164 100644 --- a/python/paddle/trainer_config_helpers/attrs.py +++ b/python/paddle/trainer_config_helpers/attrs.py @@ -13,8 +13,9 @@ # limitations under the License. from paddle.trainer.config_parser import * -__all__ = ['ParamAttr', 'ExtraAttr', 'ParameterAttribute', - 'ExtraLayerAttribute'] +__all__ = [ + 'ParamAttr', 'ExtraAttr', 'ParameterAttribute', 'ExtraLayerAttribute' +] def convert_and_compare(x, Type): @@ -25,7 +26,8 @@ def convert_and_compare(x, Type): :param Type: target type to check x over """ - return type(x)(Type(x))==x + return type(x)(Type(x)) == x + def is_compatible_with(x, Type): """ @@ -38,9 +40,9 @@ def is_compatible_with(x, Type): return True try: if float == Type or int == Type: - # avoid those types that can be converted to float/int but not very - # meaningful and could potentially lead to error - # i.e., str and bool typed value should not be used for initializing float/int variable + # avoid those types that can be converted to float/int but not very + # meaningful and could potentially lead to error + # i.e., str and bool typed value should not be used for initializing float/int variable if not isinstance(x, str) and not isinstance(x, bool): return convert_and_compare(x, Type) elif bool == Type: @@ -91,9 +93,17 @@ class ParameterAttribute(object): :type sparse_update: bool """ - def __init__(self, name=None, is_static=False, initial_std=None, - initial_mean=None, initial_max=None, initial_min=None, - l1_rate=None, l2_rate=None, learning_rate=None, momentum=None, + def __init__(self, + name=None, + is_static=False, + initial_std=None, + initial_mean=None, + initial_max=None, + initial_min=None, + l1_rate=None, + l2_rate=None, + learning_rate=None, + momentum=None, sparse_update=False): # initialize strategy. if is_static: @@ -183,7 +193,10 @@ class ExtraLayerAttribute(object): :type device: int """ - def __init__(self, error_clipping_threshold=None, drop_rate=None, device=None): + def __init__(self, + error_clipping_threshold=None, + drop_rate=None, + device=None): self.attr = dict() if isinstance(error_clipping_threshold, float): assert error_clipping_threshold > 0 @@ -200,8 +213,8 @@ def check(self, layer_name): for key in self.attr: if not hasattr(self, 'can_%s' % key) or \ not getattr(self, 'can_%s' % key): - raise NotImplementedError( - "Layer %s cannot support %s" % (layer_name, key)) + raise NotImplementedError("Layer %s cannot support %s" % + (layer_name, key)) @staticmethod def to_kwargs(attr): diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index 283a45df30844..b41097953dad8 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """ Data Sources are helpers to define paddle training data or testing data. """ @@ -26,8 +25,12 @@ __all__ = ['define_py_data_sources2'] -def define_py_data_source(file_list, cls, module, - obj, args=None, async=False, +def define_py_data_source(file_list, + cls, + module, + obj, + args=None, + async=False, data_cls=PyData): """ Define a python data source. @@ -76,8 +79,9 @@ def define_py_data_source(file_list, cls, module, args = pickle.dumps(args, 0) if data_cls is None: + def py_data2(files, load_data_module, load_data_object, load_data_args, - **kwargs): + **kwargs): data = DataBase() data.type = 'py2' data.files = files @@ -86,17 +90,25 @@ def py_data2(files, load_data_module, load_data_object, load_data_args, data.load_data_args = load_data_args data.async_load_data = True return data - data_cls = py_data2 - - cls(data_cls(files=file_list, - load_data_module=module, - load_data_object=obj, - load_data_args=args, - async_load_data=async)) + data_cls = py_data2 -def define_py_data_sources(train_list, test_list, module, obj, args=None, - train_async=False, data_cls=PyData): + cls( + data_cls( + files=file_list, + load_data_module=module, + load_data_object=obj, + load_data_args=args, + async_load_data=async)) + + +def define_py_data_sources(train_list, + test_list, + module, + obj, + args=None, + train_async=False, + data_cls=PyData): """ The annotation is almost the same as define_py_data_sources2, except that it can specific train_async and data_cls. @@ -125,8 +137,8 @@ def define_py_data_sources(train_list, test_list, module, obj, args=None, """ def __is_splitable__(o): - return (isinstance(o, list) or isinstance(o, tuple) - ) and hasattr(o, '__len__') and len(o) == 2 + return (isinstance(o, list) or + isinstance(o, tuple)) and hasattr(o, '__len__') and len(o) == 2 assert train_list is not None or test_list is not None assert module is not None and obj is not None @@ -196,9 +208,10 @@ def define_py_data_sources2(train_list, test_list, module, obj, args=None): :return: None :rtype: None """ - define_py_data_sources(train_list=train_list, - test_list=test_list, - module=module, - obj=obj, - args=args, - data_cls=None) + define_py_data_sources( + train_list=train_list, + test_list=test_list, + module=module, + obj=obj, + args=args, + data_cls=None) diff --git a/python/paddle/trainer_config_helpers/default_decorators.py b/python/paddle/trainer_config_helpers/default_decorators.py index be00f48b457c1..c01050e338d59 100644 --- a/python/paddle/trainer_config_helpers/default_decorators.py +++ b/python/paddle/trainer_config_helpers/default_decorators.py @@ -18,16 +18,18 @@ from .activations import TanhActivation from paddle.trainer.config_parser import * -__all__ = ['wrap_name_default', 'wrap_param_attr_default', - 'wrap_bias_attr_default', 'wrap_act_default', - 'wrap_param_default'] +__all__ = [ + 'wrap_name_default', 'wrap_param_attr_default', 'wrap_bias_attr_default', + 'wrap_act_default', 'wrap_param_default' +] def __default_not_set_callback__(kwargs, name): return name not in kwargs or kwargs[name] is None -def wrap_param_default(param_names=None, default_factory=None, +def wrap_param_default(param_names=None, + default_factory=None, not_set_callback=__default_not_set_callback__): assert param_names is not None assert isinstance(param_names, list) or isinstance(param_names, tuple) @@ -43,7 +45,8 @@ def __wrapper__(*args, **kwargs): if argspec.defaults: num_positional -= len(argspec.defaults) if not argspec.varargs and len(args) > num_positional: - logger.fatal("Must use keyword arguments for non-positional args") + logger.fatal( + "Must use keyword arguments for non-positional args") for name in param_names: if not_set_callback(kwargs, name): # Not set kwargs[name] = default_factory(func) @@ -112,13 +115,13 @@ def wrap_param_attr_default(param_names=None, default_factory=None): return wrap_param_default(param_names, default_factory) -def wrap_bias_attr_default(param_names=None, default_factory=None, +def wrap_bias_attr_default(param_names=None, + default_factory=None, has_bias=True): if param_names is None: param_names = ['bias_attr'] if default_factory is None: - default_factory = lambda _: ParamAttr(initial_std=0., - initial_mean=0.) + default_factory = lambda _: ParamAttr(initial_std=0., initial_mean=0.) def __bias_attr_not_set__(kwargs, name): if has_bias: diff --git a/python/paddle/trainer_config_helpers/evaluators.py b/python/paddle/trainer_config_helpers/evaluators.py index ded124a5c8ca4..dc6a36392f9c6 100644 --- a/python/paddle/trainer_config_helpers/evaluators.py +++ b/python/paddle/trainer_config_helpers/evaluators.py @@ -15,13 +15,14 @@ from paddle.trainer.config_parser import * from default_decorators import * -__all__ = ["evaluator_base","classification_error_evaluator", "auc_evaluator", - "pnpair_evaluator", "precision_recall_evaluator", - "ctc_error_evaluator", "chunk_evaluator", "sum_evaluator", - "column_sum_evaluator", "value_printer_evaluator", - "gradient_printer_evaluator", "maxid_printer_evaluator", - "maxframe_printer_evaluator", "seqtext_printer_evaluator", - "classification_error_printer_evaluator"] +__all__ = [ + "evaluator_base", "classification_error_evaluator", "auc_evaluator", + "pnpair_evaluator", "precision_recall_evaluator", "ctc_error_evaluator", + "chunk_evaluator", "sum_evaluator", "column_sum_evaluator", + "value_printer_evaluator", "gradient_printer_evaluator", + "maxid_printer_evaluator", "maxframe_printer_evaluator", + "seqtext_printer_evaluator", "classification_error_printer_evaluator" +] class EvaluatorAttribute(object): @@ -32,10 +33,7 @@ class EvaluatorAttribute(object): FOR_UTILS = 1 << 4 KEYS = [ - "for_classification", - "for_regression", - "for_rank", - "for_print", + "for_classification", "for_regression", "for_rank", "for_print", "for_utils" ] @@ -55,22 +53,23 @@ def impl(method): setattr(method, EvaluatorAttribute.to_key(attr), True) method.is_evaluator = True return method + return impl -def evaluator_base( - input, - type, - label=None, - weight=None, - name=None, - chunk_scheme=None, - num_chunk_types=None, - classification_threshold=None, - positive_label=None, - dict_file=None, - result_file=None, - num_results=None, - delimited=None): + +def evaluator_base(input, + type, + label=None, + weight=None, + name=None, + chunk_scheme=None, + num_chunk_types=None, + classification_threshold=None, + positive_label=None, + dict_file=None, + result_file=None, + num_results=None, + delimited=None): """ Evaluator will evaluate the network status while training/testing. @@ -130,14 +129,14 @@ def evaluator_base( result_file=result_file, delimited=delimited) + @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @wrap_name_default() -def classification_error_evaluator( - input, - label, - name=None, - weight=None, - threshold=None): +def classification_error_evaluator(input, + label, + name=None, + weight=None, + threshold=None): """ Classification Error Evaluator. It will print error rate for classification. @@ -170,13 +169,14 @@ def classification_error_evaluator( :return: None. """ - evaluator_base(name=name, - type="classification_error", - input=input, - label=label, - weight=weight, - classification_threshold=threshold, - ) + evaluator_base( + name=name, + type="classification_error", + input=input, + label=label, + weight=weight, + classification_threshold=threshold, ) + @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @wrap_name_default() @@ -184,8 +184,7 @@ def auc_evaluator( input, label, name=None, - weight=None, - ): + weight=None, ): """ Auc Evaluator which adapts to binary classification. @@ -205,11 +204,13 @@ def auc_evaluator( [sample_num, 1]. :type weight: LayerOutput """ - evaluator_base(name=name, - type="last-column-auc", - input=input, - label=label, - weight=weight) + evaluator_base( + name=name, + type="last-column-auc", + input=input, + label=label, + weight=weight) + @evaluator(EvaluatorAttribute.FOR_RANK) @wrap_name_default() @@ -218,8 +219,7 @@ def pnpair_evaluator( label, info, name=None, - weight=None, - ): + weight=None, ): """ Positive-negative pair rate Evaluator which adapts to rank task like learning to rank. This evaluator must contain at least three layers. @@ -242,12 +242,14 @@ def pnpair_evaluator( [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput """ - evaluator_base(name=name, - type="pnpair", - input=input, - label=label, - info=info, - weight=weight) + evaluator_base( + name=name, + type="pnpair", + input=input, + label=label, + info=info, + weight=weight) + @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @wrap_name_default() @@ -256,8 +258,7 @@ def precision_recall_evaluator( label, positive_label=None, weight=None, - name=None, - ): + name=None, ): """ An Evaluator to calculate precision and recall, F1-score. It is adapt to the task with multiple labels. @@ -286,20 +287,21 @@ def precision_recall_evaluator( [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput """ - evaluator_base(name=name, - type="precision_recall", - input=input, - label=label, - positive_label=positive_label, - weight=weight) + evaluator_base( + name=name, + type="precision_recall", + input=input, + label=label, + positive_label=positive_label, + weight=weight) + @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @wrap_name_default() def ctc_error_evaluator( input, label, - name=None, - ): + name=None, ): """ This evaluator is to calculate sequence-to-sequence edit distance. @@ -317,10 +319,9 @@ def ctc_error_evaluator( label for ctc_layer :type label: LayerOutput """ - evaluator_base(name=name, - type="ctc_edit_distance", - input=input, - label=label) + evaluator_base( + name=name, type="ctc_edit_distance", input=input, label=label) + @evaluator(EvaluatorAttribute.FOR_CLASSIFICATION) @wrap_name_default() @@ -328,8 +329,7 @@ def chunk_evaluator( input, name=None, chunk_scheme=None, - num_chunk_types=None, - ): + num_chunk_types=None, ): """ Chunk evaluator is used to evaluate segment labelling accuracy for a sequence. It calculates the chunk detection F1 score. @@ -375,19 +375,20 @@ def chunk_evaluator( :type chunk_scheme: basestring :param num_chunk_types: number of chunk types other than "other" """ - evaluator_base(name=name, - type="chunk", - input=input, - chunk_scheme=chunk_scheme, - num_chunk_types=num_chunk_types) + evaluator_base( + name=name, + type="chunk", + input=input, + chunk_scheme=chunk_scheme, + num_chunk_types=num_chunk_types) + @evaluator(EvaluatorAttribute.FOR_UTILS) @wrap_name_default() def sum_evaluator( input, name=None, - weight=None, - ): + weight=None, ): """ An Evaluator to sum the result of input. @@ -405,18 +406,15 @@ def sum_evaluator( [sample_num, 1]. (TODO, explaination) :type weight: LayerOutput """ - evaluator_base(name=name, - type="sum", - input=input, - weight=weight) + evaluator_base(name=name, type="sum", input=input, weight=weight) + @evaluator(EvaluatorAttribute.FOR_UTILS) @wrap_name_default() def column_sum_evaluator( input, name=None, - weight=None, - ): + weight=None, ): """ This Evaluator is used to sum the last column of input. @@ -431,22 +429,22 @@ def column_sum_evaluator( :param input: Input Layer name. :type input: LayerOutput """ - evaluator_base(name=name, - type="last-column-sum", - input=input, - weight=weight) + evaluator_base( + name=name, type="last-column-sum", input=input, weight=weight) + """ The following are printer Evaluators which are usually used to print the result, like value or gradient of input layers, the results generated in machine translation, the classification error etc. """ + + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() def value_printer_evaluator( input, - name=None, - ): + name=None, ): """ This Evaluator is used to print the values of input layers. It contains one or more input layers. @@ -462,16 +460,14 @@ def value_printer_evaluator( :param name: Evaluator name. :type name: None|basestring """ - evaluator_base(name=name, - type="value_printer", - input=input) + evaluator_base(name=name, type="value_printer", input=input) + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() def gradient_printer_evaluator( input, - name=None, - ): + name=None, ): """ This Evaluator is used to print the gradient of input layers. It contains one or more input layers. @@ -487,17 +483,15 @@ def gradient_printer_evaluator( :param name: Evaluator name. :type name: None|basestring """ - evaluator_base(name=name, - type="gradient_printer", - input=input) + evaluator_base(name=name, type="gradient_printer", input=input) + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() def maxid_printer_evaluator( input, num_results=None, - name=None, - ): + name=None, ): """ This Evaluator is used to print maximum top k values and their indexes of each row of input layers. It contains one or more input layers. @@ -517,18 +511,16 @@ def maxid_printer_evaluator( :param name: Evaluator name. :type name: None|basestring """ - evaluator_base(name=name, - type="max_id_printer", - input=input, - num_results=num_results) + evaluator_base( + name=name, type="max_id_printer", input=input, num_results=num_results) + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() def maxframe_printer_evaluator( input, num_results=None, - name=None, - ): + name=None, ): """ This Evaluator is used to print the top k frames of each input layers. The input layers should contain sequences info or sequences type. @@ -549,10 +541,12 @@ def maxframe_printer_evaluator( :param name: Evaluator name. :type name: None|basestring """ - evaluator_base(name=name, - type="max_frame_printer", - input=input, - num_results=num_results) + evaluator_base( + name=name, + type="max_frame_printer", + input=input, + num_results=num_results) + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() @@ -562,8 +556,7 @@ def seqtext_printer_evaluator( id_input=None, dict_file=None, delimited=None, - name=None, - ): + name=None, ): """ Sequence text printer will print text according to index matrix and a dictionary. There can be multiple input to this layer: @@ -636,12 +629,14 @@ def seqtext_printer_evaluator( inputs = [id_input, input] input.parents.append(id_input) - evaluator_base(name=name, - type="seq_text_printer", - input=inputs, - dict_file=dict_file, - result_file=result_file, - delimited=delimited) + evaluator_base( + name=name, + type="seq_text_printer", + input=inputs, + dict_file=dict_file, + result_file=result_file, + delimited=delimited) + @evaluator(EvaluatorAttribute.FOR_PRINT) @wrap_name_default() @@ -649,8 +644,7 @@ def classification_error_printer_evaluator( input, label, threshold=0.5, - name=None, - ): + name=None, ): """ This Evaluator is used to print the classification error of each sample. @@ -667,8 +661,9 @@ def classification_error_printer_evaluator( :param name: Evaluator name. :type name: None|basestring """ - evaluator_base(name=name, - type="classification_error_printer", - input=input, - label=label, - classification_threshold=threshold) + evaluator_base( + name=name, + type="classification_error_printer", + input=input, + label=label, + classification_threshold=threshold) diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index a0a367f2d50df..796121a64136e 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -29,36 +29,83 @@ import pickle import copy -__all__ = ["full_matrix_projection", "AggregateLevel", "ExpandLevel", - "identity_projection", "dotmul_projection", "dotmul_operator", - "repeat_layer", - "table_projection", "mixed_layer", "data_layer", - "embedding_layer", "fc_layer", "grumemory", - "pooling_layer", "lstmemory", "last_seq", "first_seq", - "cos_sim", "hsigmoid", "conv_projection", - "regression_cost", 'classification_cost', "LayerOutput", - 'img_conv_layer', 'img_pool_layer', 'batch_norm_layer', - 'img_cmrnorm_layer', 'addto_layer', - 'concat_layer', 'lstm_step_layer', 'recurrent_group', - 'memory', 'StaticInput', 'expand_layer', 'scaling_layer', - 'power_layer', 'interpolation_layer', 'bilinear_interp_layer', - 'trans_layer', 'sum_to_one_norm_layer', - 'get_output_layer', 'LayerType', 'context_projection', - 'beam_search', 'maxid_layer', 'GeneratedInput', 'SubsequenceInput', - 'gru_step_layer', 'recurrent_layer', - 'BaseGeneratedInput', 'conv_operator', 'conv_shift_layer', - 'tensor_layer', 'selective_fc_layer', 'sampling_id_layer', - 'slope_intercept_layer', 'trans_full_matrix_projection', - 'linear_comb_layer', - 'convex_comb_layer', 'ctc_layer', 'crf_layer', 'crf_decoding_layer', - 'nce_layer', - 'cross_entropy_with_selfnorm', 'cross_entropy', - 'multi_binary_label_cross_entropy', 'sum_cost', - 'rank_cost', 'lambda_cost', 'huber_cost', - 'block_expand_layer', - 'maxout_layer', 'out_prod_layer', 'print_layer', - 'spp_layer', - ] +__all__ = [ + "full_matrix_projection", + "AggregateLevel", + "ExpandLevel", + "identity_projection", + "dotmul_projection", + "dotmul_operator", + "repeat_layer", + "table_projection", + "mixed_layer", + "data_layer", + "embedding_layer", + "fc_layer", + "grumemory", + "pooling_layer", + "lstmemory", + "last_seq", + "first_seq", + "cos_sim", + "hsigmoid", + "conv_projection", + "regression_cost", + 'classification_cost', + "LayerOutput", + 'img_conv_layer', + 'img_pool_layer', + 'batch_norm_layer', + 'img_cmrnorm_layer', + 'addto_layer', + 'concat_layer', + 'lstm_step_layer', + 'recurrent_group', + 'memory', + 'StaticInput', + 'expand_layer', + 'scaling_layer', + 'power_layer', + 'interpolation_layer', + 'bilinear_interp_layer', + 'trans_layer', + 'sum_to_one_norm_layer', + 'get_output_layer', + 'LayerType', + 'context_projection', + 'beam_search', + 'maxid_layer', + 'GeneratedInput', + 'SubsequenceInput', + 'gru_step_layer', + 'recurrent_layer', + 'BaseGeneratedInput', + 'conv_operator', + 'conv_shift_layer', + 'tensor_layer', + 'selective_fc_layer', + 'sampling_id_layer', + 'slope_intercept_layer', + 'trans_full_matrix_projection', + 'linear_comb_layer', + 'convex_comb_layer', + 'ctc_layer', + 'crf_layer', + 'crf_decoding_layer', + 'nce_layer', + 'cross_entropy_with_selfnorm', + 'cross_entropy', + 'multi_binary_label_cross_entropy', + 'sum_cost', + 'rank_cost', + 'lambda_cost', + 'huber_cost', + 'block_expand_layer', + 'maxout_layer', + 'out_prod_layer', + 'print_layer', + 'spp_layer', +] class LayerType(object): @@ -181,8 +228,15 @@ class LayerOutput(object): :type parents: list|tuple|collections.Sequence """ - def __init__(self, name, layer_type, parents=None, activation=None, - num_filters=None, img_norm_type=None, size=None, outputs=None, + def __init__(self, + name, + layer_type, + parents=None, + activation=None, + num_filters=None, + img_norm_type=None, + size=None, + outputs=None, reverse=None): assert isinstance(name, basestring) assert isinstance(layer_type, basestring) @@ -223,6 +277,7 @@ def __str__(self): def layer_support(*attrs): attrs_list = list(attrs) attrs_list.append(DEVICE) + def decorator(method): @functools.wraps(method) def wrapper(*args, **kwargs): @@ -282,9 +337,8 @@ def full_matrix_projection(input, size=0, param_attr=None): :return: A FullMatrixProjection Object. :rtype: FullMatrixProjection """ - proj = FullMatrixProjection(input_layer_name=input.name, - size=size, - **param_attr.attr) + proj = FullMatrixProjection( + input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj @@ -319,9 +373,8 @@ def trans_full_matrix_projection(input, size=0, param_attr=None): :return: A TransposedFullMatrixProjection Object. :rtype: TransposedFullMatrixProjection """ - proj = TransposedFullMatrixProjection(input_layer_name=input.name, - size=size, - **param_attr.attr) + proj = TransposedFullMatrixProjection( + input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj @@ -365,9 +418,8 @@ def table_projection(input, size=0, param_attr=None): :return: A TableProjection Object. :rtype: TableProjection """ - proj = TableProjection(input_layer_name=input.name, - size=size, - **param_attr.attr) + proj = TableProjection( + input_layer_name=input.name, size=size, **param_attr.attr) proj.origin = input return proj @@ -413,8 +465,8 @@ def identity_projection(input, offset=None): proj = IdentityProjection(input_layer_name=input.name) proj.origin = input else: - proj = IdentityOffsetProjection(input_layer_name=input.name, - offset=offset) + proj = IdentityOffsetProjection( + input_layer_name=input.name, offset=offset) proj.origin = input return proj @@ -443,9 +495,8 @@ def dotmul_projection(input, param_attr=None): :return: A DotMulProjection Object. :rtype: DotMulProjection """ - proj = DotMulProjection(input_layer_name=input.name, - size=input.size, - **param_attr.attr) + proj = DotMulProjection( + input_layer_name=input.name, size=input.size, **param_attr.attr) proj.origin = input return proj @@ -478,21 +529,22 @@ def dotmul_operator(a=None, b=None, scale=1, **kwargs): if 'x' in kwargs or 'y' in kwargs: logger.warning('x and y arguments for dotmul_operator is deprecated. ' 'Please use a and b as parameter.') - a = kwargs.get('x', a) # For Backward capacity. + a = kwargs.get('x', a) # For Backward capacity. b = kwargs.get('y', b) assert isinstance(a, LayerOutput) assert isinstance(b, LayerOutput) if a.size is not None and b.size is not None: assert a.size == b.size - op = DotMulOperator(input_layer_names=[a.name, b.name], - scale=scale) + op = DotMulOperator(input_layer_names=[a.name, b.name], scale=scale) op.origin = [a, b] return op @wrap_bias_attr_default(['padding_attr']) -def context_projection(input, context_len, context_start=None, +def context_projection(input, + context_len, + context_start=None, padding_attr=False): """ Context Projection. @@ -529,11 +581,12 @@ def context_projection(input, context_len, context_start=None, if trainable: extra_dict = padding_attr.attr - proj = ContextProjection(input_layer_name=input.name, - context_length=context_len, - context_start=context_start, - trainable_padding=trainable, - **extra_dict) + proj = ContextProjection( + input_layer_name=input.name, + context_length=context_len, + context_start=context_start, + trainable_padding=trainable, + **extra_dict) proj.origin = input return proj @@ -547,8 +600,7 @@ class AddToSealedMixedLayerException(Exception): def __init__(self): Exception.__init__(self) - def __init__(self, name, size, act, bias_attr, layer_attr, - parents=None): + def __init__(self, name, size, act, bias_attr, layer_attr, parents=None): """ Ctor. :param name: layer name. @@ -565,8 +617,13 @@ def __init__(self, name, size, act, bias_attr, layer_attr, :param layer_attr: Extra Layer Attribute. :type layer_attr: ExtraLayerAttribute or None """ - LayerOutput.__init__(self, name, LayerType.MIXED_LAYER, parents, - size=size, activation=act) + LayerOutput.__init__( + self, + name, + LayerType.MIXED_LAYER, + parents, + size=size, + activation=act) self.bias_attr = bias_attr self.layer_attr = layer_attr self.inputs = [] @@ -604,8 +661,7 @@ def __exit__(self, *args, **kwargs): active_type=self.activation.name, bias=ParamAttr.to_bias(self.bias_attr), inputs=self.inputs, - **ExtraLayerAttribute.to_kwargs(self.layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(self.layer_attr)) # update the size which might be computed inside MixedLayer # according to the operator's output size self.size = ml.config.size @@ -615,7 +671,11 @@ def __exit__(self, *args, **kwargs): @wrap_act_default(act=LinearActivation()) @wrap_bias_attr_default(has_bias=False) @layer_support(ERROR_CLIPPING, DROPOUT) -def mixed_layer(size=0, input=None, name=None, act=None, bias_attr=False, +def mixed_layer(size=0, + input=None, + name=None, + act=None, + bias_attr=False, layer_attr=None): """ Mixed Layer. A mixed layer will add all inputs together, then activate. @@ -660,8 +720,12 @@ def mixed_layer(size=0, input=None, name=None, act=None, bias_attr=False, if input is None: return MixedLayerType(name, size, act, bias_attr, layer_attr) else: - with mixed_layer(name=name, size=size, act=act, bias_attr=bias_attr, - layer_attr=layer_attr) as m: + with mixed_layer( + name=name, + size=size, + act=act, + bias_attr=bias_attr, + layer_attr=layer_attr) as m: if isinstance(input, collections.Sequence): for each in input: m += each @@ -691,8 +755,11 @@ def data_layer(name, size, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput """ - Layer(type=LayerType.DATA, name=name, size=size, - **ExtraLayerAttribute.to_kwargs(layer_attr)) + Layer( + type=LayerType.DATA, + name=name, + size=size, + **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.DATA, size=size) @@ -718,9 +785,12 @@ def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput """ - with mixed_layer(name=name, size=size, act=LinearActivation(), - bias_attr=False, - layer_attr=layer_attr) as mix: + with mixed_layer( + name=name, + size=size, + act=LinearActivation(), + bias_attr=False, + layer_attr=layer_attr) as mix: mix += table_projection(input=input, size=size, param_attr=param_attr) return mix @@ -730,8 +800,13 @@ def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None): @wrap_bias_attr_default() @wrap_act_default() @layer_support(ERROR_CLIPPING, DROPOUT) -def fc_layer(input, size, act=None, name=None, - param_attr=None, bias_attr=None, layer_attr=None): +def fc_layer(input, + size, + act=None, + name=None, + param_attr=None, + bias_attr=None, + layer_attr=None): """ Helper for declare fully connected layer. @@ -783,17 +858,17 @@ def fc_layer(input, size, act=None, name=None, assert isinstance(input, collections.Sequence) Layer( - inputs=[Input(ipt.name, **attr.attr) for ipt, attr in zip( - input, param_attr)], + inputs=[ + Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr) + ], name=name, type=LayerType.FC_LAYER, size=size, bias=ParamAttr.to_bias(bias_attr), active_type=act.name, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.FC_LAYER, input, activation=act, - size=size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.FC_LAYER, input, activation=act, size=size) @wrap_name_default("print") @@ -816,8 +891,7 @@ def print_layer(input, name=None): Layer( name=name, type=LayerType.PRINT_LAYER, - inputs=[l.name for l in input], - ) + inputs=[l.name for l in input], ) # this layer don't return anything, can not be input of other layer. @@ -825,7 +899,10 @@ def print_layer(input, name=None): @wrap_bias_attr_default(has_bias=False) @wrap_param_default(['pooling_type'], default_factory=lambda _: MaxPooling()) @layer_support() -def pooling_layer(input, pooling_type=None, name=None, bias_attr=None, +def pooling_layer(input, + pooling_type=None, + name=None, + bias_attr=None, agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ @@ -872,24 +949,27 @@ def pooling_layer(input, pooling_type=None, name=None, bias_attr=None, inputs=[Input(input.name)], bias=ParamAttr.to_bias(bias_attr), trans_type=agg_level, - **extra_dict - ) - - return LayerOutput(name, pooling_type.name, parents=[input], - size=input.size) + **extra_dict) + return LayerOutput( + name, pooling_type.name, parents=[input], size=input.size) @wrap_bias_attr_default() @wrap_param_attr_default() -@wrap_act_default(param_names=['gate_act'], - act=SigmoidActivation()) +@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(param_names=["act", 'state_act'], act=TanhActivation()) @wrap_name_default("lstmemory") @layer_support(DROPOUT) -def lstmemory(input, name=None, reverse=False, act=None, - gate_act=None, size=None, - state_act=None, bias_attr=None, param_attr=None, +def lstmemory(input, + name=None, + reverse=False, + act=None, + gate_act=None, + size=None, + state_act=None, + bias_attr=None, + param_attr=None, layer_attr=None): """ Long Short-term Memory Cell. @@ -964,30 +1044,38 @@ def lstmemory(input, name=None, reverse=False, act=None, "layer. The lstm size should be equal with input layer size/4. The" " size which is set explicitly will be ignored." % name) - Layer(name=name, - type=LayerType.LSTMEMORY, - active_type=act.name, - active_state_type=state_act.name, - active_gate_type=gate_act.name, - reversed=reverse, - bias=ParamAttr.to_bias(bias_attr), - inputs=[Input(input.name, **param_attr.attr)], - **ExtraLayerAttribute.to_kwargs(layer_attr)) + Layer( + name=name, + type=LayerType.LSTMEMORY, + active_type=act.name, + active_state_type=state_act.name, + active_gate_type=gate_act.name, + reversed=reverse, + bias=ParamAttr.to_bias(bias_attr), + inputs=[Input(input.name, **param_attr.attr)], + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.LSTMEMORY, [input], size=input.size / 4, - reverse=reverse) + return LayerOutput( + name, + LayerType.LSTMEMORY, [input], + size=input.size / 4, + reverse=reverse) @wrap_bias_attr_default() @wrap_param_attr_default() -@wrap_act_default(param_names=['gate_act'], - act=SigmoidActivation()) +@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(param_names=["act"], act=TanhActivation()) @wrap_name_default("gru") @layer_support(DROPOUT) -def grumemory(input, name=None, reverse=False, act=None, - gate_act=None, size=None, - bias_attr=None, param_attr=None, +def grumemory(input, + name=None, + reverse=False, + act=None, + gate_act=None, + size=None, + bias_attr=None, + param_attr=None, layer_attr=None): """ Gate Recurrent Unit Layer. @@ -1078,23 +1166,28 @@ def grumemory(input, name=None, reverse=False, act=None, " and should be input size / 3. Set size explicitly will be " "ignored.") - Layer(name=name, - type=LayerType.GRUMEMORY, - active_type=act.name, - active_gate_type=gate_act.name, - reversed=reverse, - bias=ParamAttr.to_bias(bias_attr), - inputs=[Input(input.name, **param_attr.attr)], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + Layer( + name=name, + type=LayerType.GRUMEMORY, + active_type=act.name, + active_gate_type=gate_act.name, + reversed=reverse, + bias=ParamAttr.to_bias(bias_attr), + inputs=[Input(input.name, **param_attr.attr)], + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.GRUMEMORY, [input], size=input.size / 3, - reverse=reverse) + return LayerOutput( + name, + LayerType.GRUMEMORY, [input], + size=input.size / 3, + reverse=reverse) @wrap_name_default() @layer_support() -def last_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, +def last_seq(input, + name=None, + agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ Get Last Timestamp Activation of a sequence. @@ -1120,15 +1213,19 @@ def last_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, type=LayerType.SEQUENCE_LAST_INSTANCE, inputs=[input.name], trans_type=agg_level, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SEQUENCE_LAST_INSTANCE, parents=[input], - size=input.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.SEQUENCE_LAST_INSTANCE, + parents=[input], + size=input.size) @wrap_name_default() @layer_support() -def first_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, +def first_seq(input, + name=None, + agg_level=AggregateLevel.EACH_TIMESTEP, layer_attr=None): """ Get First Timestamp Activation of a sequence. @@ -1155,10 +1252,12 @@ def first_seq(input, name=None, agg_level=AggregateLevel.EACH_TIMESTEP, type=LayerType.SEQUENCE_FIRST_INSTANCE, inputs=[input.name], trans_type=agg_level, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SEQUENCE_FIRST_INSTANCE, - parents=[input], size=input.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.SEQUENCE_FIRST_INSTANCE, + parents=[input], + size=input.size) class ExpandLevel(object): @@ -1168,7 +1267,8 @@ class ExpandLevel(object): @wrap_name_default() @layer_support() -def expand_layer(input, expand_as, +def expand_layer(input, + expand_as, name=None, bias_attr=False, expand_level=ExpandLevel.FROM_TIMESTEP, @@ -1208,19 +1308,17 @@ def expand_layer(input, expand_as, bias=ParamAttr.to_bias(bias_attr=bias_attr), type=LayerType.EXPAND_LAYER, trans_type=expand_level, - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name=name, - size=input.size, - layer_type=LayerType.EXPAND_LAYER, - parents=[input, expand_as]) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + size=input.size, + layer_type=LayerType.EXPAND_LAYER, + parents=[input, expand_as]) @wrap_name_default() @layer_support() -def repeat_layer(input, num_repeats, - name=None, - layer_attr=None): +def repeat_layer(input, num_repeats, name=None, layer_attr=None): """ A layer for repeating the input for num_repeats times. This is equivalent to apply concat_layer() with num_repeats same input. @@ -1251,12 +1349,13 @@ def repeat_layer(input, num_repeats, name=name, num_filters=num_repeats, type=LayerType.FEATURE_MAP_EXPAND_LAYER, - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name=name, - size=l.config.size, - layer_type=LayerType.FEATURE_MAP_EXPAND_LAYER, - parents=[input]) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + size=l.config.size, + layer_type=LayerType.FEATURE_MAP_EXPAND_LAYER, + parents=[input]) + @wrap_name_default() @layer_support() @@ -1302,11 +1401,12 @@ def interpolation_layer(input, weight, name=None, layer_attr=None): name=name, type=LayerType.INTERPOLATION_LAYER, inputs=[weight.name, input[0].name, input[1].name], - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.INTERPOLATION_LAYER, - parents=[weight, input[0], input[1]], - size=input[0].size) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.INTERPOLATION_LAYER, + parents=[weight, input[0], input[1]], + size=input[0].size) @wrap_name_default() @@ -1345,15 +1445,23 @@ def bilinear_interp_layer(input, assert out_size_x > 0 and out_size_y > 0 assert input.num_filters is not None num_channels = input.num_filters - l = Layer(name=name, - inputs=Input(input.name, - bilinear_interp=BilinearInterp(out_size_x=out_size_x, - out_size_y=out_size_y, - num_channels=num_channels)), - type=LayerType.BILINEAR_INTERP_LAYER, - **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.BILINEAR_INTERP_LAYER, parents=[input], - num_filters=num_channels, size=l.config.size) + l = Layer( + name=name, + inputs=Input( + input.name, + bilinear_interp=BilinearInterp( + out_size_x=out_size_x, + out_size_y=out_size_y, + num_channels=num_channels)), + type=LayerType.BILINEAR_INTERP_LAYER, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.BILINEAR_INTERP_LAYER, + parents=[input], + num_filters=num_channels, + size=l.config.size) + @wrap_name_default() @layer_support() @@ -1392,10 +1500,9 @@ def power_layer(input, weight, name=None, layer_attr=None): name=name, type=LayerType.POWER_LAYER, inputs=[weight.name, input.name], - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.POWER_LAYER, - parents=[input, weight], size=input.size) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.POWER_LAYER, parents=[input, weight], size=input.size) @wrap_name_default() @@ -1437,10 +1544,9 @@ def scaling_layer(input, weight, name=None, layer_attr=None): name=name, type=LayerType.SCALING_LAYER, inputs=[weight.name, input.name], - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SCALING_LAYER, parents=[weight, input], - size=input.size) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.SCALING_LAYER, parents=[weight, input], size=input.size) @wrap_name_default() @@ -1473,10 +1579,9 @@ def trans_layer(input, name=None, layer_attr=None): name=name, type=LayerType.TRANS_LAYER, inputs=[input.name], - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.TRANS_LAYER, parents=[input], - size=input.size) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.TRANS_LAYER, parents=[input], size=input.size) @wrap_name_default() @@ -1518,8 +1623,7 @@ def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None): type=LayerType.COSINE_SIM, cos_scale=scale, inputs=[a.name, b.name], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) else: if a.size is not None and b.size is not None: assert size == b.size / a.size @@ -1529,8 +1633,7 @@ def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None): size=size, cos_scale=scale, inputs=[a.name, b.name], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b], size=size) @@ -1538,8 +1641,13 @@ def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None): @wrap_bias_attr_default(has_bias=True) @wrap_param_attr_default() @layer_support() -def hsigmoid(input, label, num_classes, name=None, bias_attr=None, - param_attr=None, layer_attr=None): +def hsigmoid(input, + label, + num_classes, + name=None, + bias_attr=None, + param_attr=None, + layer_attr=None): """ Organize the classes into a binary tree. At each node, a sigmoid function is used to calculate the probability of belonging to the right branch. @@ -1600,10 +1708,9 @@ def hsigmoid(input, label, num_classes, name=None, bias_attr=None, num_classes=num_classes, bias=ParamAttr.to_bias(bias_attr), inputs=ipts_for_layer, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.HSIGMOID, parents=parents, - size=l.config.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.HSIGMOID, parents=parents, size=l.config.size) @wrap_name_default("conv") @@ -1611,11 +1718,22 @@ def hsigmoid(input, label, num_classes, name=None, bias_attr=None, @wrap_bias_attr_default() @wrap_act_default(act=ReluActivation()) @layer_support(DROPOUT) -def img_conv_layer(input, filter_size, num_filters, - name=None, num_channels=None, - act=None, groups=1, stride=1, padding=0, bias_attr=None, - param_attr=None, shared_biases=True, layer_attr=None, - filter_size_y=None, stride_y=None, padding_y=None, +def img_conv_layer(input, + filter_size, + num_filters, + name=None, + num_channels=None, + act=None, + groups=1, + stride=1, + padding=0, + bias_attr=None, + param_attr=None, + shared_biases=True, + layer_attr=None, + filter_size_y=None, + stride_y=None, + padding_y=None, trans=False): """ Convolution layer for image. Paddle only support square input currently and @@ -1713,40 +1831,56 @@ def img_conv_layer(input, filter_size, num_filters, if param_attr.attr.get('initial_smart'): # special initial for conv layers. - init_w = (2.0 / (filter_size ** 2 * num_channels)) ** 0.5 + init_w = (2.0 / (filter_size**2 * num_channels))**0.5 param_attr.attr["initial_mean"] = 0.0 param_attr.attr["initial_std"] = init_w param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False - + lt = LayerType.CONVTRANS_LAYER if trans else LayerType.CONV_LAYER - + l = Layer( name=name, - inputs=Input(input.name, conv=Conv( - filter_size=filter_size, padding=padding, stride=stride, - channels=num_channels, groups=groups, - filter_size_y=filter_size_y, padding_y=padding_y, - stride_y=stride_y), - **param_attr.attr), + inputs=Input( + input.name, + conv=Conv( + filter_size=filter_size, + padding=padding, + stride=stride, + channels=num_channels, + groups=groups, + filter_size_y=filter_size_y, + padding_y=padding_y, + stride_y=stride_y), + **param_attr.attr), active_type=act.name, num_filters=num_filters, bias=ParamAttr.to_bias(bias_attr), shared_biases=shared_biases, type=lt, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, lt, parents=[input], - activation=act, num_filters=num_filters, - size=l.config.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + lt, + parents=[input], + activation=act, + num_filters=num_filters, + size=l.config.size) @wrap_name_default("pool") @layer_support() -def img_pool_layer(input, pool_size, name=None, - num_channels=None, pool_type=None, - stride=1, padding=0, layer_attr=None, - pool_size_y=None, stride_y=None, padding_y=None, +def img_pool_layer(input, + pool_size, + name=None, + num_channels=None, + pool_type=None, + stride=1, + padding=0, + layer_attr=None, + pool_size_y=None, + stride_y=None, + padding_y=None, img_width=None): """ Image pooling Layer. @@ -1804,29 +1938,39 @@ def img_pool_layer(input, pool_size, name=None, l = Layer( name=name, type=LayerType.POOL_LAYER, - inputs=[Input(input.name, - pool=Pool( - pool_type=type_name, - channels=num_channels, - size_x=pool_size, - start=None, - stride=stride, - padding=padding, - size_y=pool_size_y, - stride_y=stride_y, - padding_y=padding_y, - img_width=img_width - ))], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.POOL_LAYER, parents=[input], - num_filters=num_channels, size=l.config.size) + inputs=[ + Input( + input.name, + pool=Pool( + pool_type=type_name, + channels=num_channels, + size_x=pool_size, + start=None, + stride=stride, + padding=padding, + size_y=pool_size_y, + stride_y=stride_y, + padding_y=padding_y, + img_width=img_width)) + ], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.POOL_LAYER, + parents=[input], + num_filters=num_channels, + size=l.config.size) @wrap_name_default("spp") @layer_support() -def spp_layer(input, name=None, num_channels=None, pool_type=None, - pyramid_height=None, img_width=None, layer_attr=None): +def spp_layer(input, + name=None, + num_channels=None, + pool_type=None, + pyramid_height=None, + img_width=None, + layer_attr=None): """ Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. The details please refer to @@ -1866,42 +2010,58 @@ def spp_layer(input, name=None, num_channels=None, pool_type=None, l = Layer( name=name, type=LayerType.SPP_LAYER, - inputs=Input(input.name, - spp=SpatialPyramidPool(pool_type=type_name, - channels=num_channels, - pyramid_height=pyramid_height, - img_width=img_width) - ), - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, layer_type=LayerType.SPP_LAYER, parents=[input], - num_filters=num_channels, size=l.config.size) - - -def __img_norm_layer__(name, input, size, norm_type, scale, power, - num_channels, blocked, layer_attr): + inputs=Input( + input.name, + spp=SpatialPyramidPool( + pool_type=type_name, + channels=num_channels, + pyramid_height=pyramid_height, + img_width=img_width)), + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + layer_type=LayerType.SPP_LAYER, + parents=[input], + num_filters=num_channels, + size=l.config.size) + + +def __img_norm_layer__(name, input, size, norm_type, scale, power, num_channels, + blocked, layer_attr): if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters l = Layer( - name=name, type=LayerType.NORM_LAYER, inputs=Input( - input.name, norm=Norm(norm_type=norm_type, - channels=num_channels, size=size, - scale=scale, - pow=power, blocked=blocked) - ), - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, layer_type=LayerType.NORM_LAYER, parents=[input], - num_filters=num_channels, img_norm_type=norm_type, - size=l.config.size) + name=name, + type=LayerType.NORM_LAYER, + inputs=Input( + input.name, + norm=Norm( + norm_type=norm_type, + channels=num_channels, + size=size, + scale=scale, + pow=power, + blocked=blocked)), + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + layer_type=LayerType.NORM_LAYER, + parents=[input], + num_filters=num_channels, + img_norm_type=norm_type, + size=l.config.size) @wrap_name_default("crmnorm") @layer_support() -def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, - name=None, num_channels=None, +def img_cmrnorm_layer(input, + size, + scale=0.0128, + power=0.75, + name=None, + num_channels=None, layer_attr=None): """ Response normalization across feature maps. @@ -1935,8 +2095,13 @@ def img_cmrnorm_layer(input, size, scale=0.0128, power=0.75, @wrap_act_default(act=ReluActivation()) @wrap_name_default("batch_norm") @layer_support(DROPOUT) -def batch_norm_layer(input, act=None, name=None, num_channels=None, - bias_attr=None, param_attr=None, layer_attr=None, +def batch_norm_layer(input, + act=None, + name=None, + num_channels=None, + bias_attr=None, + param_attr=None, + layer_attr=None, batch_norm_type=None, moving_average_fraction=0.9, use_global_stats=None): @@ -2022,22 +2187,23 @@ def batch_norm_layer(input, act=None, name=None, num_channels=None, (batch_norm_type == "cudnn_batch_norm") l = Layer( name=name, - inputs=Input(input.name, - image=Image(channels=num_channels), - **param_attr.attr), + inputs=Input( + input.name, image=Image(channels=num_channels), **param_attr.attr), active_type=act.name, type=LayerType.BATCH_NORM_LAYER, batch_norm_type=batch_norm_type, bias=ParamAttr.to_bias(bias_attr), moving_average_fraction=moving_average_fraction, use_global_stats=use_global_stats, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, layer_type=LayerType.BATCH_NORM_LAYER, - parents=[input], activation=act, - num_filters=num_channels, - size=l.config.size) + return LayerOutput( + name=name, + layer_type=LayerType.BATCH_NORM_LAYER, + parents=[input], + activation=act, + num_filters=num_channels, + size=l.config.size) @wrap_name_default() @@ -2072,18 +2238,16 @@ def sum_to_one_norm_layer(input, name=None, layer_attr=None): name=name, type=LayerType.SUM_TO_ONE_NORM_LAYER, inputs=[input.name], - **ExtraAttr.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SUM_TO_ONE_NORM_LAYER, parents=[input], - size=input.size) + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.SUM_TO_ONE_NORM_LAYER, parents=[input], size=input.size) @wrap_name_default("addto") @wrap_act_default(act=LinearActivation()) @wrap_bias_attr_default(has_bias=False) @layer_support(DROPOUT) -def addto_layer(input, act=None, name=None, bias_attr=None, - layer_attr=None): +def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None): """ AddtoLayer. @@ -2143,15 +2307,20 @@ def addto_layer(input, act=None, name=None, bias_attr=None, num_filters = each_input.num_filters l = Layer( - name=name, type=LayerType.ADDTO_LAYER, inputs=ipts_for_layer, + name=name, + type=LayerType.ADDTO_LAYER, + inputs=ipts_for_layer, bias=ParamAttr.to_bias(bias_attr), active_type=act.name, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.ADDTO_LAYER, parents=input, - activation=act, num_filters=num_filters, - size=l.config.size) + return LayerOutput( + name, + LayerType.ADDTO_LAYER, + parents=input, + activation=act, + num_filters=num_filters, + size=l.config.size) @wrap_act_default(act=IdentityActivation()) @@ -2210,22 +2379,22 @@ def __reduce_concat_type__(a, b): LayerOutput) return a - is_concat_layer = __is_type__(reduce(__reduce_concat_type__, - map(type, input)), LayerOutput) + is_concat_layer = __is_type__( + reduce(__reduce_concat_type__, map(type, input)), LayerOutput) - layer_type = (LayerType.CONCAT_LAYER if is_concat_layer - else LayerType.CONCAT_PROJ_LAYER) + layer_type = (LayerType.CONCAT_LAYER + if is_concat_layer else LayerType.CONCAT_PROJ_LAYER) if layer_type == LayerType.CONCAT_LAYER: assert not bias_attr Layer( - name=name, type=layer_type, + name=name, + type=layer_type, inputs=[x.name for x in input] if is_concat_layer else input, active_type=act.name, bias=ParamAttr.to_bias(bias_attr), - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) sz = 0 for each_input in input: @@ -2235,14 +2404,20 @@ def __reduce_concat_type__(a, b): sz = None break - return LayerOutput(name, layer_type=layer_type, - parents=input if is_concat_layer else [ - x.origin for x in input], - activation=act, size=sz) - - -def memory(name, size, is_seq=False, boot_layer=None, - boot_bias=None, boot_bias_active_type=None, + return LayerOutput( + name, + layer_type=layer_type, + parents=input if is_concat_layer else [x.origin for x in input], + activation=act, + size=sz) + + +def memory(name, + size, + is_seq=False, + boot_layer=None, + boot_bias=None, + boot_bias_active_type=None, boot_with_const_id=None): """ The memory layers is a layer cross each time step. Reference this output @@ -2290,30 +2465,33 @@ def memory(name, size, is_seq=False, boot_layer=None, assert boot_layer is None or isinstance(boot_layer, LayerOutput) - agent_name = Memory(name, size, - is_seq, - boot_layer.name if boot_layer is not None else None, - boot_bias, - boot_bias_active_type.name, - boot_with_const_id) - - lout = LayerOutput(name=agent_name, size=size, - layer_type=LayerType.MEMORY, - parents=[boot_layer] if boot_layer is not None - else None) + agent_name = Memory(name, size, is_seq, boot_layer.name + if boot_layer is not None else None, boot_bias, + boot_bias_active_type.name, boot_with_const_id) + + lout = LayerOutput( + name=agent_name, + size=size, + layer_type=LayerType.MEMORY, + parents=[boot_layer] if boot_layer is not None else None) return lout @wrap_bias_attr_default() -@wrap_act_default(param_names=['gate_act', - 'state_act'], - act=SigmoidActivation()) +@wrap_act_default( + param_names=['gate_act', 'state_act'], act=SigmoidActivation()) @wrap_act_default(act=TanhActivation()) @wrap_name_default('lstm_step') @layer_support() -def lstm_step_layer(input, state, size, act=None, - name=None, gate_act=None, state_act=None, - bias_attr=None, layer_attr=None): +def lstm_step_layer(input, + state, + size, + act=None, + name=None, + gate_act=None, + state_act=None, + bias_attr=None, + layer_attr=None): """ LSTM Step Layer. It used in recurrent_group. The lstm equations are shown as follow. @@ -2380,24 +2558,32 @@ def lstm_step_layer(input, state, size, act=None, active_gate_type=gate_act.name, active_state_type=state_act.name, bias=ParamAttr.to_bias(bias_attr), - size=size, inputs=[input.name, state.name], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + size=size, + inputs=[input.name, state.name], + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, layer_type=LayerType.LSTM_STEP_LAYER, - parents=[input, state], activation=act, - size=size, outputs=['default', 'state']) + return LayerOutput( + name=name, + layer_type=LayerType.LSTM_STEP_LAYER, + parents=[input, state], + activation=act, + size=size, + outputs=['default', 'state']) @wrap_bias_attr_default() -@wrap_act_default(param_names=['gate_act'], - act=SigmoidActivation()) +@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation()) @wrap_act_default(act=TanhActivation()) @wrap_name_default('gru_step') @layer_support() -def gru_step_layer(input, output_mem, size=None, act=None, - name=None, gate_act=None, - bias_attr=None, layer_attr=None): +def gru_step_layer(input, + output_mem, + size=None, + act=None, + name=None, + gate_act=None, + bias_attr=None, + layer_attr=None): """ :param input: @@ -2418,20 +2604,18 @@ def gru_step_layer(input, output_mem, size=None, act=None, Layer( name=name, type=LayerType.GRU_STEP_LAYER, - inputs=[ - input.name, - output_mem.name - ], + inputs=[input.name, output_mem.name], bias=ParamAttr.to_bias(bias_attr), size=size, active_type=act.name, active_gate_type=gate_act.name, - **ExtraAttr.to_kwargs(layer_attr) - ) + **ExtraAttr.to_kwargs(layer_attr)) return LayerOutput( - name=name, layer_type=LayerType.GRU_STEP_LAYER, + name=name, + layer_type=LayerType.GRU_STEP_LAYER, parents=[input, output_mem], - size=size, activation=act) + size=size, + activation=act) @wrap_name_default() @@ -2459,13 +2643,19 @@ def get_output_layer(input, arg_name, name=None, layer_attr=None): ' The get output name is %s, which not' \ ' in %s' % ( arg_name, ",".join(input.outputs)) - Layer(name=name, type=LayerType.GET_OUTPUT_LAYER, - inputs=[Input(input.name, input_layer_argument=arg_name)], - size=input.size, - **ExtraLayerAttribute.to_kwargs(layer_attr)) + Layer( + name=name, + type=LayerType.GET_OUTPUT_LAYER, + inputs=[Input( + input.name, input_layer_argument=arg_name)], + size=input.size, + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, layer_type=LayerType.GET_OUTPUT_LAYER, - parents=[input], size=input.size) + return LayerOutput( + name=name, + layer_type=LayerType.GET_OUTPUT_LAYER, + parents=[input], + size=input.size) @wrap_name_default() @@ -2473,8 +2663,13 @@ def get_output_layer(input, arg_name, name=None, layer_attr=None): @wrap_bias_attr_default() @wrap_param_attr_default() @layer_support() -def recurrent_layer(input, act=None, bias_attr=None, - param_attr=None, name=None, reverse=False, layer_attr=None): +def recurrent_layer(input, + act=None, + bias_attr=None, + param_attr=None, + name=None, + reverse=False, + layer_attr=None): """ Simple recurrent unit layer. It is just a fully connect layer through both time and neural network. @@ -2509,16 +2704,21 @@ def recurrent_layer(input, act=None, bias_attr=None, :return: LayerOutput object. :rtype: LayerOutput """ - Layer(name=name, - type=LayerType.RECURRENT_LAYER, - inputs=Input(input.name, **param_attr.attr), - active_type=act.name, - bias=ParamAttr.to_bias(bias_attr), - reversed=reverse, - **ExtraAttr.to_kwargs(layer_attr)) - return LayerOutput(name=name, layer_type=LayerType.RECURRENT_LAYER, - parents=[input], size=input.size, activation=act, - reverse=reverse) + Layer( + name=name, + type=LayerType.RECURRENT_LAYER, + inputs=Input(input.name, **param_attr.attr), + active_type=act.name, + bias=ParamAttr.to_bias(bias_attr), + reversed=reverse, + **ExtraAttr.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + layer_type=LayerType.RECURRENT_LAYER, + parents=[input], + size=input.size, + activation=act, + reverse=reverse) class StaticInput(object): @@ -2646,7 +2846,7 @@ def targetInlink_in_inlinks(): return True return False - assert(targetInlink == None or targetInlink_in_inlinks()) + assert (targetInlink == None or targetInlink_in_inlinks()) targetInlinkName = None if targetInlink == None \ else targetInlink.name if isinstance(targetInlink, LayerOutput) \ else targetInlink.input.name @@ -2661,7 +2861,8 @@ def map_in_links(x): return x.name RecurrentLayerGroupWithoutOutLinksBegin( - name=name, in_links=map(map_in_links, in_links), + name=name, + in_links=map(map_in_links, in_links), seq_reversed=reverse, target_inlinkname=targetInlinkName) in_args = [] @@ -2673,12 +2874,15 @@ def map_in_links(x): in_args.append(each_input.input) else: mem_name = "__%s_memory__" % each_input.input.name - mem = memory(name=mem_name, - is_seq=each_input.is_seq, - size=each_input.input.size, - boot_layer=each_input.input) - with mixed_layer(name=mem_name, size=each_input.input.size, - act=IdentityActivation()) as mix: + mem = memory( + name=mem_name, + is_seq=each_input.is_seq, + size=each_input.input.size, + boot_layer=each_input.input) + with mixed_layer( + name=mem_name, + size=each_input.input.size, + act=IdentityActivation()) as mix: mix += identity_projection(mem) in_args.append(mem) @@ -2720,14 +2924,15 @@ def after_real_step(self, input): return maxid_layer(input=input, name='__beam_search_predict__') def before_real_step(self): - predict_id = memory(name='__beam_search_predict__', - size=self.size, - boot_with_const_id=self.bos_id) - - trg_emb = embedding_layer(input=predict_id, - size=self.embedding_size, - param_attr=ParamAttr( - name=self.embedding_name)) + predict_id = memory( + name='__beam_search_predict__', + size=self.size, + boot_with_const_id=self.bos_id) + + trg_emb = embedding_layer( + input=predict_id, + size=self.embedding_size, + param_attr=ParamAttr(name=self.embedding_name)) return trg_emb def __init__(self, size, embedding_name, embedding_size): @@ -2760,14 +2965,16 @@ def maxid_layer(input, name=None, layer_attr=None): """ assert isinstance(input, LayerOutput) - l = Layer(name=name, - type='maxid', - inputs=[input.name], - **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, - layer_type=LayerType.MAXID_LAYER, - parents=[input], - size=l.config.size) + l = Layer( + name=name, + type='maxid', + inputs=[input.name], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + layer_type=LayerType.MAXID_LAYER, + parents=[input], + size=l.config.size) @wrap_name_default() @@ -2796,14 +3003,16 @@ def out_prod_layer(input1, input2, name=None, layer_attr=None): assert isinstance(input1, LayerOutput) assert isinstance(input2, LayerOutput) - l = Layer(name=name, - type=LayerType.OUT_PROD_LAYER, - inputs=[input1.name, input2.name], - **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, - layer_type=LayerType.OUT_PROD_LAYER, - parents=[input1, input2], - size=l.config.size) + l = Layer( + name=name, + type=LayerType.OUT_PROD_LAYER, + inputs=[input1.name, input2.name], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + layer_type=LayerType.OUT_PROD_LAYER, + parents=[input1, input2], + size=l.config.size) @wrap_name_default() @@ -2832,19 +3041,27 @@ def eos_layer(input, eos_id, name=None, layer_attr=None): :return: LayerOutput object. :rtype: LayerOutput """ - l = Layer(name=name, - type=LayerType.EOSID_LAYER, - eos_id=eos_id, - inputs=[input.name], - **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name=name, layer_type=LayerType.EOSID_LAYER, - parents=[input], - size=l.config.size) + l = Layer( + name=name, + type=LayerType.EOSID_LAYER, + eos_id=eos_id, + inputs=[input.name], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name=name, + layer_type=LayerType.EOSID_LAYER, + parents=[input], + size=l.config.size) @wrap_name_default() -def beam_search(step, input, bos_id, eos_id, beam_size, - max_length=500, name=None, +def beam_search(step, + input, + bos_id, + eos_id, + beam_size, + max_length=500, + name=None, num_results_per_sample=None): """ Beam search is a heuristic search algorithm used in sequence generation. @@ -2918,8 +3135,7 @@ def rnn_step(input): if num_results_per_sample > beam_size: logger.warning("num_results_per_sample should be less than beam_size") - if isinstance(input, StaticInput) or isinstance(input, - BaseGeneratedInput): + if isinstance(input, StaticInput) or isinstance(input, BaseGeneratedInput): input = [input] generated_input_index = -1 @@ -2944,11 +3160,12 @@ def rnn_step(input): def __real_step__(*args): eos_name = "__%s_eos_layer__" % name - RecurrentLayerGroupSetGenerator(Generator( - eos_layer_name=eos_name, - max_num_frames=max_length, - beam_size=beam_size, - num_results_per_sample=num_results_per_sample)) + RecurrentLayerGroupSetGenerator( + Generator( + eos_layer_name=eos_name, + max_num_frames=max_length, + beam_size=beam_size, + num_results_per_sample=num_results_per_sample)) args = list(args) args.insert(generated_input_index, gipt.before_real_step()) @@ -2959,11 +3176,12 @@ def __real_step__(*args): return predict - tmp = recurrent_group(step=__real_step__, input=real_input, reverse=False, - name=name) + tmp = recurrent_group( + step=__real_step__, input=real_input, reverse=False, name=name) return tmp + def __cost_input__(input, label, weight=None): """ inputs and parents for cost layers. @@ -2979,8 +3197,7 @@ def __cost_input__(input, label, weight=None): @wrap_name_default() @layer_support() -def regression_cost(input, label, weight=None, name=None, - layer_attr=None): +def regression_cost(input, label, weight=None, name=None, layer_attr=None): """ Regression Layer. @@ -3002,14 +3219,20 @@ def regression_cost(input, label, weight=None, name=None, """ ipts, parents = __cost_input__(input, label, weight) - Layer(inputs=ipts, type="square_error", name=name, - **ExtraLayerAttribute.to_kwargs(layer_attr)) + Layer( + inputs=ipts, + type="square_error", + name=name, + **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.COST, parents=parents, size=1) @wrap_name_default("cost") @layer_support() -def classification_cost(input, label, weight=None, name=None, +def classification_cost(input, + label, + weight=None, + name=None, evaluator=classification_error_evaluator, layer_attr=None): """ @@ -3036,8 +3259,11 @@ def classification_cost(input, label, weight=None, name=None, ipts, parents = __cost_input__(input, label, weight) - Layer(name=name, type="multi-class-cross-entropy", inputs=ipts, - **ExtraLayerAttribute.to_kwargs(layer_attr)) + Layer( + name=name, + type="multi-class-cross-entropy", + inputs=ipts, + **ExtraLayerAttribute.to_kwargs(layer_attr)) def __add_evaluator__(e): assert callable(e) @@ -3059,9 +3285,16 @@ def __add_evaluator__(e): return LayerOutput(name, LayerType.COST, parents=parents, size=1) -def conv_operator(img, filter, filter_size, num_filters, - num_channels=None, stride=1, padding=0, - filter_size_y=None, stride_y=None, padding_y=None): +def conv_operator(img, + filter, + filter_size, + num_filters, + num_channels=None, + stride=1, + padding=0, + filter_size_y=None, + stride_y=None, + padding_y=None): """ Different from img_conv_layer, conv_op is an Operator, which can be used in mixed_layer. And conv_op takes two inputs to perform convolution. @@ -3117,24 +3350,34 @@ def conv_operator(img, filter, filter_size, num_filters, if filter.size is not None: filter.size = filter_size * filter_size_y * num_filters * num_channels - op = ConvOperator(input_layer_names=[img.name, filter.name], - num_filters=num_filters, - conv_conf=Conv(filter_size=filter_size, - padding=padding, - stride=stride, - channels=num_channels, - filter_size_y=filter_size_y, - padding_y=padding_y, - stride_y=stride_y, - groups=1)) + op = ConvOperator( + input_layer_names=[img.name, filter.name], + num_filters=num_filters, + conv_conf=Conv( + filter_size=filter_size, + padding=padding, + stride=stride, + channels=num_channels, + filter_size_y=filter_size_y, + padding_y=padding_y, + stride_y=stride_y, + groups=1)) op.origin = [img, filter] return op + @wrap_param_attr_default() -def conv_projection(input, filter_size, num_filters, - num_channels=None, stride=1, padding=0, - filter_size_y=None, stride_y=None, padding_y=None, - groups=1, param_attr=None): +def conv_projection(input, + filter_size, + num_filters, + num_channels=None, + stride=1, + padding=0, + filter_size_y=None, + stride_y=None, + padding_y=None, + groups=1, + param_attr=None): """ ConvProjection with a layer as input. It performs element-wise multiplication with weight. @@ -3206,23 +3449,25 @@ def conv_projection(input, filter_size, num_filters, if param_attr.attr.get('initial_smart'): # special initial for conv layers. - init_w = (2.0 / (filter_size ** 2 * num_channels)) ** 0.5 + init_w = (2.0 / (filter_size**2 * num_channels))**0.5 param_attr.attr["initial_mean"] = 0.0 param_attr.attr["initial_std"] = init_w param_attr.attr["initial_strategy"] = 0 param_attr.attr["initial_smart"] = False - proj = ConvProjection(input_layer_name=input.name, - num_filters=num_filters, - conv_conf=Conv(filter_size=filter_size, - padding=padding, - stride=stride, - channels=num_channels, - filter_size_y=filter_size_y, - padding_y=padding_y, - stride_y=stride_y, - groups=groups), - **param_attr.attr) + proj = ConvProjection( + input_layer_name=input.name, + num_filters=num_filters, + conv_conf=Conv( + filter_size=filter_size, + padding=padding, + stride=stride, + channels=num_channels, + filter_size_y=filter_size_y, + padding_y=padding_y, + stride_y=stride_y, + groups=groups), + **param_attr.attr) proj.origin = input return proj @@ -3270,11 +3515,10 @@ def conv_shift_layer(a, b, name=None, layer_attr=None): name=name, type=LayerType.CONV_SHIFT_LAYER, inputs=[a.name, b.name], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.CONV_SHIFT_LAYER, parents=[a, b], - size=a.size) + return LayerOutput( + name, LayerType.CONV_SHIFT_LAYER, parents=[a, b], size=a.size) @wrap_name_default() @@ -3282,8 +3526,14 @@ def conv_shift_layer(a, b, name=None, layer_attr=None): @wrap_bias_attr_default() @wrap_act_default(act=LinearActivation()) @layer_support(ERROR_CLIPPING, DROPOUT) -def tensor_layer(a, b, size, act=None, name=None, - param_attr=None, bias_attr=None, layer_attr=None): +def tensor_layer(a, + b, + size, + act=None, + name=None, + param_attr=None, + bias_attr=None, + layer_attr=None): """ This layer performs tensor operation for two input. For example, each sample: @@ -3332,12 +3582,10 @@ def tensor_layer(a, b, size, act=None, name=None, type=LayerType.TENSOR_LAYER, active_type=act.name, bias=ParamAttr.to_bias(bias_attr), - inputs=[Input(a.name, **param_attr.attr), - Input(b.name)], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.TENSOR_LAYER, parents=[a, b], - activation=act, size=size) + inputs=[Input(a.name, **param_attr.attr), Input(b.name)], + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.TENSOR_LAYER, parents=[a, b], activation=act, size=size) @wrap_name_default() @@ -3345,11 +3593,17 @@ def tensor_layer(a, b, size, act=None, name=None, @wrap_bias_attr_default() @wrap_act_default() @layer_support() -def selective_fc_layer(input, select, size, act=None, name=None, +def selective_fc_layer(input, + select, + size, + act=None, + name=None, pass_generation=False, has_selected_colums=True, mul_ratio=0.02, - param_attr=None, bias_attr=None, layer_attr=None): + param_attr=None, + bias_attr=None, + layer_attr=None): """ Selectived fully connected layer. Different from fc_layer, the output of this layer maybe sparse. It requires an additional input to indicate @@ -3399,8 +3653,9 @@ def selective_fc_layer(input, select, size, act=None, name=None, if select.size is not None: assert select.size == size Layer( - inputs=[Input(ipt.name, **attr.attr) for ipt, attr in zip( - input, param_attr)] + [select.name], + inputs=[ + Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr) + ] + [select.name], name=name, type=LayerType.SEL_FC_LAYER, size=size, @@ -3409,11 +3664,13 @@ def selective_fc_layer(input, select, size, act=None, name=None, selective_fc_pass_generation=pass_generation, has_selected_colums=has_selected_colums, selective_fc_full_mul_ratio=mul_ratio, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SEL_FC_LAYER, list(input) + [select], - activation=act, - size=size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, + LayerType.SEL_FC_LAYER, + list(input) + [select], + activation=act, + size=size) @wrap_name_default() @@ -3442,15 +3699,17 @@ def sampling_id_layer(input, name=None, layer_attr=None): name=name, type=LayerType.SAMPLING_ID_LAYER, inputs=[Input(input.name)], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SAMPLING_ID_LAYER, input, - size=l.config.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.SAMPLING_ID_LAYER, input, size=l.config.size) @wrap_name_default() @layer_support() -def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0, +def slope_intercept_layer(input, + name=None, + slope=1.0, + intercept=0.0, layer_attr=None): """ This layer for applying a slope and an intercept to the input @@ -3484,16 +3743,14 @@ def slope_intercept_layer(input, name=None, slope=1.0, intercept=0.0, slope=slope, intercept=intercept, inputs=[Input(input.name)], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.SLOPE_INTERCEPT_LAYER, input, - size=input.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.SLOPE_INTERCEPT_LAYER, input, size=input.size) @wrap_name_default() @layer_support() -def linear_comb_layer(weights, vectors, size=None, name=None, - layer_attr=None): +def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None): """ A layer for weighted sum of vectors takes two inputs. - Input: size of weights is M @@ -3543,7 +3800,7 @@ def linear_comb_layer(weights, vectors, size=None, name=None, if vectors.size is not None and weights.size is not None: assert vectors.size % weights.size == 0 if size is None: - size = vectors.size / weights.size + size = vectors.size / weights.size else: assert size == vectors.size / weights.size Layer( @@ -3551,10 +3808,9 @@ def linear_comb_layer(weights, vectors, size=None, name=None, type=LayerType.LINEAR_COMBINATION_LAYER, size=size, inputs=[Input(weights.name), Input(vectors.name)], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.LINEAR_COMBINATION_LAYER, - [weights, vectors], size=size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.LINEAR_COMBINATION_LAYER, [weights, vectors], size=size) convex_comb_layer = linear_comb_layer @@ -3626,21 +3882,23 @@ def block_expand_layer(input, if num_channels is None: assert input.num_filters is not None num_channels = input.num_filters - l = Layer(name=name, - inputs=Input(input.name, - block_expand=BlockExpand(channels=num_channels, - block_x=block_x, - block_y=block_y, - stride_x=stride_x, - stride_y=stride_y, - padding_x=padding_x, - padding_y=padding_y)), - type=LayerType.BLOCK_EXPAND, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - - return LayerOutput(name, LayerType.BLOCK_EXPAND, parents=[input], - size=l.config.size) + l = Layer( + name=name, + inputs=Input( + input.name, + block_expand=BlockExpand( + channels=num_channels, + block_x=block_x, + block_y=block_y, + stride_x=stride_x, + stride_y=stride_y, + padding_x=padding_x, + padding_y=padding_y)), + type=LayerType.BLOCK_EXPAND, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + + return LayerOutput( + name, LayerType.BLOCK_EXPAND, parents=[input], size=l.config.size) @wrap_name_default() @@ -3701,19 +3959,24 @@ def maxout_layer(input, assert input.num_filters is not None num_channels = input.num_filters assert num_channels % groups == 0 - l = Layer(name=name, - inputs=Input(input.name, - maxout=MaxOut(channels=num_channels, - groups=groups)), - type=LayerType.MAXOUT, - **ExtraLayerAttribute.to_kwargs(layer_attr)) - return LayerOutput(name, LayerType.MAXOUT, parents=[input], - size=l.config.size) + l = Layer( + name=name, + inputs=Input( + input.name, maxout=MaxOut( + channels=num_channels, groups=groups)), + type=LayerType.MAXOUT, + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.MAXOUT, parents=[input], size=l.config.size) @wrap_name_default() @layer_support() -def ctc_layer(input, label, size=None, name=None, norm_by_times=False, +def ctc_layer(input, + label, + size=None, + name=None, + norm_by_times=False, layer_attr=None): """ Connectionist Temporal Classification (CTC) is designed for temporal @@ -3769,15 +4032,19 @@ def ctc_layer(input, label, size=None, name=None, norm_by_times=False, size=size, norm_by_times=norm_by_times, inputs=[input.name, label.name], - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) return LayerOutput(name, LayerType.CTC_LAYER, [input, label], size=size) @wrap_name_default() @wrap_param_attr_default() @layer_support() -def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, +def crf_layer(input, + label, + size=None, + weight=None, + param_attr=None, + name=None, layer_attr=None): """ A layer for calculating the cost of sequential conditional random @@ -3819,8 +4086,7 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, else: assert size == input.size - ipts = [Input(input.name, **param_attr.attr), - Input(label.name)] + ipts = [Input(input.name, **param_attr.attr), Input(label.name)] if weight is not None: ipts.append(Input(weight.name)) @@ -3829,8 +4095,7 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, type=LayerType.CRF_LAYER, size=size, inputs=ipts, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) parents = [input, label] if weight is not None: parents.append(weight) @@ -3843,7 +4108,11 @@ def crf_layer(input, label, size=None, weight=None, param_attr=None, name=None, @wrap_name_default() @wrap_param_attr_default() @layer_support() -def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, +def crf_decoding_layer(input, + size, + label=None, + param_attr=None, + name=None, layer_attr=None): """ A layer for calculating the decoding sequence of sequential conditional @@ -3880,8 +4149,7 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, type=LayerType.CRF_DECODING_LAYER, size=size, inputs=ipts, - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) + **ExtraLayerAttribute.to_kwargs(layer_attr)) parents = [input] if label is not None: parents.append(label) @@ -3890,12 +4158,19 @@ def crf_decoding_layer(input, size, label=None, param_attr=None, name=None, # classes. return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1) + @wrap_bias_attr_default(has_bias=True) @wrap_name_default() @layer_support() -def nce_layer(input, label, num_classes, weight=None, - num_neg_samples=10, neg_distribution=None, - name=None, bias_attr=None, layer_attr=None): +def nce_layer(input, + label, + num_classes, + weight=None, + num_neg_samples=10, + neg_distribution=None, + name=None, + bias_attr=None, + layer_attr=None): """ Noise-contrastive estimation. Implements the method in the following paper: @@ -3964,10 +4239,10 @@ def nce_layer(input, label, num_classes, weight=None, num_neg_samples=num_neg_samples, inputs=ipts_for_layer, bias=ParamAttr.to_bias(bias_attr), - **ExtraLayerAttribute.to_kwargs(layer_attr) - ) - return LayerOutput(name, LayerType.NCE_LAYER, parents=parents, - size=l.config.size) + **ExtraLayerAttribute.to_kwargs(layer_attr)) + return LayerOutput( + name, LayerType.NCE_LAYER, parents=parents, size=l.config.size) + """ following are cost Layers. @@ -3976,7 +4251,13 @@ def nce_layer(input, label, num_classes, weight=None, @wrap_name_default() @layer_support() -def rank_cost(left, right, label, weight=None, name=None, coeff=1.0, layer_attr=None): +def rank_cost(left, + right, + label, + weight=None, + name=None, + coeff=1.0, + layer_attr=None): """ A cost Layer for learning to rank using gradient descent. Details can refer to `papers