Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add convolution Function #2282

Merged
merged 25 commits into from
Jun 19, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
3b65bc7
Add a naive convolution implement
hedaoyuan May 26, 2017
b6de52c
Bug fix
hedaoyuan May 26, 2017
1846d9e
Add a convolution Function based on gemm.
hedaoyuan May 26, 2017
1879332
Modify FunctionCompare to Compare2Function to support comparison of t…
hedaoyuan May 26, 2017
455888c
Add ConvOpTest for NaiveConv and GemmConv
hedaoyuan May 26, 2017
048b14a
Change stride to strides, and change padding to paddings.
hedaoyuan May 26, 2017
3ce974b
Add group argument in ConvFunctionBase
hedaoyuan May 31, 2017
3c0aa0c
Add GPU GemmConvFunction implementation
hedaoyuan Jun 2, 2017
c70d3e1
Some bug fix
hedaoyuan Jun 2, 2017
3408b4b
Bug fix
hedaoyuan Jun 2, 2017
afbe556
Modify the arguments description of ConvFunctionBase. And add the def…
hedaoyuan Jun 5, 2017
6a93f0f
Add the calculation implementation of GemmConvGradFilterFunction
hedaoyuan Jun 5, 2017
9032619
Bug fix & add test of GemmConvGradFilter.
hedaoyuan Jun 5, 2017
d99faf3
Add the calculation implementation of GemmConvGradInputFunction.
hedaoyuan Jun 6, 2017
9885c57
format
hedaoyuan Jun 6, 2017
7aac38c
Refactoring the code implementation of exconv adn exconvt layer with …
hedaoyuan Jun 6, 2017
784e218
Fix the error of group convolution.
hedaoyuan Jun 7, 2017
95a7bc0
follow comments
hedaoyuan Jun 7, 2017
e039410
Remove the code of ExpandConvTransLayer.
hedaoyuan Jun 8, 2017
1e0cc74
Merge branch 'develop' of https://github.com/baidu/Paddle into convol…
hedaoyuan Jun 13, 2017
01d52eb
Fix RowConvOpTest use CpuGpuFuncCompare.
hedaoyuan Jun 13, 2017
2608c48
Add test cases where the height and width (input, filter) are not equal.
hedaoyuan Jun 13, 2017
c6e010d
Follow comments.
hedaoyuan Jun 16, 2017
1ed31b4
Bug fix.
hedaoyuan Jun 16, 2017
9c47c42
Change the groups in the comment to 1049089.
hedaoyuan Jun 19, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion paddle/function/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ add_library(paddle_function STATIC ${cpp_files} ${cu_objs})
add_dependencies(paddle_function ${external_project_dependencies})
add_dependencies(paddle_function gen_proto_cpp)

if(WITH_GPU)
if(WITH_TESTING)
if(WITH_GPU)
# TODO:
# file(GLOB test_files . *OpTest.cpp)
# add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files})
Expand All @@ -30,6 +30,8 @@ if(WITH_TESTING)
add_simple_unittest(CosSimOpTest)
add_simple_unittest(RowConvOpTest)
endif()

add_simple_unittest(ConvOpTest)
endif()

add_style_check_target(paddle_function ${h_files})
Expand Down
4 changes: 2 additions & 2 deletions paddle/function/ContextProjectionOpTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void testMatrixProjectionForward(int context_start,
std::max(0, (int)(context_start + context_length - 1));
if (pad == 0) is_padding = false;

FunctionCompare test(
CpuGpuFuncCompare test(
"ContextProjectionForward",
FuncConfig()
.set("context_length", context_length)
Expand Down Expand Up @@ -60,7 +60,7 @@ void testMatrixProjectionBackward(int context_start,
std::max(0, (int)(context_start + context_length - 1));
if (pad == 0) is_padding = false;

FunctionCompare test(
CpuGpuFuncCompare test(
"ContextProjectionBackward",
FuncConfig()
.set("context_length", context_length)
Expand Down
146 changes: 146 additions & 0 deletions paddle/function/ConvOp.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "Function.h"

namespace paddle {

/*
* \brief Based on the ConvFunctionBase class, the forward calculation,
* backward input calculation and backward filter calculation
* of convolution operations can be implemented.
*
* Arguments of forward and backward calculation:
* 1. Forward calculation of convolution.
* inputs = {INPUT, FILTER}, outputs = {OUTPUT}
* The first and second input arguments are input image and filter data.
* The output argument is output image.
*
* 2. Backward input calculation of convolution.
* inputs = {OUTPUT_GRAD, FILTER}, outputs = {INPUT_GRAD}
* The first and second input arguments are output grad image
* and filter data.
* The output argument is input grad image.
*
* 3. Backward filter calculation of convolution.
* inputs = {OUTPUT_GRAD, INPUT}, outputs = {FILTER_GRAD}
* The first and second input arguments are output grad image
* and input image.
* The output argument is filter grad.
*
* Arguments format of input, filter and output:
* 1. Input image, output image, input image gradient, output image gradient
* are all NCHW format. Where N is batch size, C is the number of channels,
* H and W is the height and width of image or image gradient.
*
* 2. The format of the filter data is MCHW, where M is the number of output
* image channels, C is the number of input image channels,
* H and W is height and width of filter.
*
* If `groups` is greater than 1, the filter's data format should be GMCHW,
* where G is the `groups`, and G * M is the number of output image
* channels, G * C is the number of input image channels,
* H and W is height and width of filter.
*/
class ConvFunctionBase : public FunctionBase {
public:
void init(const FuncConfig& config) override {
// function arguments
strides_ = config.get<std::vector<size_t>>("strides");
paddings_ = config.get<std::vector<size_t>>("paddings");
groups_ = config.get<size_t>("groups");

// number of inputs and outputs
numInputs_ = 2;
numOutputs_ = 1;
}

virtual void calc(const BufferArgs& inputs, const BufferArgs& outputs) {}

// input can be INPUT and INPUT_GRAD
// filter can be FILTER and FILTER_GRAD
// output can be OUTPUT and OUTPUT_GRAD
void check(const TensorShape& input,
const TensorShape& filter,
const TensorShape& output) {
// inputs and outputs arguments should be 4-dimensional.
CHECK_EQ(input.ndims(), (size_t)4);
CHECK_EQ(output.ndims(), (size_t)4);
// The batchSize of the input needs to be equal to
// the batchSize of the output.
CHECK_EQ(input[0], output[0]);

if (filter.ndims() == (size_t)4) {
// If the filter's dimension is 4, groups convolution is not supported.
CHECK_EQ(groups_, (size_t)1);
// The input and output channel dimensions are the second and first
// dimensions of the filter shape.
CHECK_EQ(input[1], filter[1]);
CHECK_EQ(output[1], filter[0]);
} else {
// filter argument should be 5-dimensional.
CHECK_EQ(filter.ndims(), (size_t)5);
// The first dimension of the filter is the size of the group
CHECK_EQ(filter[0], groups_);
// The input and output channel dimensions are the third and second
// dimensions of the filter shape.
CHECK_EQ(input[1], filter[2] * groups_);
CHECK_EQ(output[1], filter[1] * groups_);
}
}

protected:
size_t getFilterHeight(const TensorShape& filter) const {
return filter[filter.ndims() - 2];
}

size_t getFilterWidth(const TensorShape& filter) const {
return filter[filter.ndims() - 1];
}

std::vector<size_t> strides_;
std::vector<size_t> paddings_;

/// Group size, refer to grouped convolution in
/// Alex Krizhevsky's paper: when group=2, the first half of the
/// filters are only connected to the first half of the input channels,
/// and the second half only connected to the second half.
size_t groups_;

inline int strideH() const { return strides_[0]; }

inline int strideW() const { return strides_[1]; }

inline int paddingH() const { return paddings_[0]; }

inline int paddingW() const { return paddings_[1]; }

// A temporary memory in convolution calculation.
MemoryHandlePtr memory_;

template <DeviceType Device>
void resizeBuffer(size_t newSize) {
if (!memory_ || newSize * sizeof(real) > memory_->getAllocSize()) {
if (Device == DEVICE_TYPE_CPU) {
memory_ = std::make_shared<CpuMemoryHandle>(newSize * sizeof(real));
} else {
memory_ = std::make_shared<GpuMemoryHandle>(newSize * sizeof(real));
}
}
}
};

} // namespace paddle
210 changes: 210 additions & 0 deletions paddle/function/ConvOpTest.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include <memory>
#include "Function.h"
#include "FunctionTest.h"

namespace paddle {

enum TestType {
kForwardTest = 0,
kBackwardInputTest = 1,
kBackwardFilterTest = 2,
};

template <DeviceType DType1, DeviceType DType2>
class ConvolutionTest {
public:
ConvolutionTest(const std::string& conv1,
const std::string& conv2,
TestType type,
std::string algo = "auto") {
for (size_t batchSize : {1, 32}) {
for (size_t inputSize : {7, 14, 54}) {
for (size_t filterSize : {1, 3, 5}) {
for (size_t inputChannels : {3, 64}) {
for (size_t outputChannels : {3, 64, 128}) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

是否需要增加长方形input,output, filter的单测?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

在这里加会让TEST变的时间很长,后续我单独增加针对长方形的测试吧。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

if (inputChannels < outputChannels) break;
for (size_t stride : {1, 2}) {
for (size_t padding : {0, 1}) {
if (padding >= filterSize) break;
size_t outputSize =
(inputSize - filterSize + 2 * padding + stride) / stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputSize
<< " inputWidth=" << inputSize
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterSize
<< " filterWidth=" << filterSize
<< " outputHeight=" << outputSize
<< " outputWidth=" << outputSize
<< " stride=" << stride << " padding=" << padding;

std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
.set("algo", algo));

TensorShape input{
batchSize, inputChannels, inputSize, inputSize};
TensorShape filter{
outputChannels, inputChannels, filterSize, filterSize};
TensorShape output{
batchSize, outputChannels, outputSize, outputSize};

if (type == kForwardTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.run();
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter));
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里不是ADD_TO?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BackwardFilter是支持ASSIGN_TO模式的。Test可以默认都用ASSIGN_TO测试,对于不支持ASSIGN_TO的Function,需要显示配置ADD_TO。

test.run();
}
}
}
}
}
}
}
}
}
};

// Mainly used to test cases where the height and width (input, filter)
// are not equal.
template <DeviceType DType1, DeviceType DType2>
class ConvolutionTest2 {
public:
ConvolutionTest2(const std::string& conv1,
const std::string& conv2,
TestType type,
std::string algo = "auto") {
for (size_t batchSize : {16}) {
for (size_t inputHeight : {7, 31}) {
for (size_t inputWidth : {10, 54}) {
for (size_t filterHeight : {1, 5}) {
for (size_t filterWidth : {3, 7}) {
for (size_t inputChannels : {7}) {
for (size_t outputChannels : {32}) {
size_t stride = 1;
size_t padding = 0;
size_t outputHeight =
(inputHeight - filterHeight + 2 * padding + stride) /
stride;
size_t outputWidth =
(inputWidth - filterWidth + 2 * padding + stride) /
stride;
VLOG(3) << " batchSize=" << batchSize
<< " inputChannels=" << inputChannels
<< " inputHeight=" << inputHeight
<< " inputWidth=" << inputWidth
<< " outputChannels=" << outputChannels
<< " filterHeight=" << filterHeight
<< " filterWidth=" << filterWidth
<< " outputHeight=" << outputHeight
<< " outputWidth=" << outputWidth
<< " stride=" << stride << " padding=" << padding;

std::vector<size_t> paddings = {padding, padding};
std::vector<size_t> strides = {stride, stride};
Compare2Function<DType1, DType2> test(
conv1,
conv2,
FuncConfig()
.set("paddings", paddings)
.set("strides", strides)
.set("groups", (size_t)1)
.set("algo", algo));

TensorShape input{
batchSize, inputChannels, inputHeight, inputWidth};
TensorShape filter{
outputChannels, inputChannels, filterHeight, filterWidth};
TensorShape output{
batchSize, outputChannels, outputHeight, outputWidth};

if (type == kForwardTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.run();
} else if (type == kBackwardInputTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, input), ADD_TO);
test.run();
} else if (type == kBackwardFilterTest) {
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, output));
test.addInputs(BufferArg(VALUE_TYPE_FLOAT, input));
test.addOutputs(BufferArg(VALUE_TYPE_FLOAT, filter));
test.run();
}
}
}
}
}
}
}
}
}
};

TEST(Forward, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test(
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_CPU> test2(
"NaiveConv-CPU", "GemmConv-CPU", kForwardTest);
}

#ifndef PADDLE_ONLY_CPU
TEST(Forward, GEMM2) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConv-CPU", "GemmConv-GPU", kForwardTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConv-CPU", "GemmConv-GPU", kForwardTest);
}

TEST(BackwardInput, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradInput-CPU", "GemmConvGradInput-GPU", kBackwardInputTest);
}

TEST(BackwardFilter, GEMM) {
ConvolutionTest<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test(
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest);
ConvolutionTest2<DEVICE_TYPE_CPU, DEVICE_TYPE_GPU> test2(
"GemmConvGradFilter-CPU", "GemmConvGradFilter-GPU", kBackwardFilterTest);
}
#endif

} // namespace paddle
Loading