Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into caffe2_core_hip
Browse files Browse the repository at this point in the history
* upstream/master:
  Makes AccumulateGrad high priority in backwards passes (pytorch#7604)
  [C++ API] Implement builder style construction (pytorch#7597)
  C10D: Added TCPStore to support C10D store interface (pytorch#7560)
  [auto] Update onnx to ba86ec2 - Protobuf typing (onnx/onnx#982) onnx/onnx@ba86ec2
  Add LBFGS optimization algorithm to C++ API (pytorch#7596)
  • Loading branch information
petrex committed May 17, 2018
2 parents f0b0184 + f229549 commit 199831d
Show file tree
Hide file tree
Showing 40 changed files with 2,235 additions and 813 deletions.
90 changes: 52 additions & 38 deletions test/cpp/api/container.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,24 @@
using namespace torch;
using namespace torch::nn;

class TestModel : public CloneableModule<TestModel> {
class TestModel : public Module {
public:
TestModel() {
add(make(Linear(10, 3)), "l1");
add(make(Linear(3, 5)), "l2");
add(make(Linear(5, 100)), "l3");
add(Linear(10, 3).build(), "l1");
add(Linear(3, 5).build(), "l2");
add(Linear(5, 100).build(), "l3");
}

variable_list forward(variable_list input) override {
return input;
};
};

class NestedModel : public CloneableModule<NestedModel> {
class NestedModel : public Module {
public:
NestedModel() {
add(make(Linear(5, 20)), "l1");
add(make(TestModel()), "test");
add(Linear(5, 20).build(), "l1");
add(std::make_shared<TestModel>(), "test");
add(Var(at::CPU(at::kFloat).tensor({3, 2, 21}), false), "param");
}

Expand All @@ -34,24 +34,23 @@ class NestedModel : public CloneableModule<NestedModel> {
TEST_CASE("containers") {
SECTION("conv") {
SECTION("1d") {
auto model = make(Conv1d(3, 2, 3).stride(2));
auto model = Conv1d(3, 2, 3).stride(2).build();
auto x = Var(at::CPU(at::kFloat).randn({2, 3, 5}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();

backward(s);
REQUIRE(y.ndimension() == 4);
REQUIRE(y.ndimension() == 3);
REQUIRE(s.ndimension() == 0);
for (auto i = 0; i < 3; i++) {
REQUIRE(y.size(i) == 2);
}

REQUIRE(model->parameters()["weight"].grad().numel() == 3 * 2 * 3);
REQUIRE(model->parameters().at("weight").grad().numel() == 3 * 2 * 3);
}

SECTION("2d") {
SECTION("even") {
auto model = make(Conv2d(3, 2, 3).stride(2));
auto model = Conv2d(3, 2, 3).stride(2).build();
auto x = Var(at::CPU(at::kFloat).randn({2, 3, 5, 5}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand All @@ -63,11 +62,12 @@ TEST_CASE("containers") {
REQUIRE(y.size(i) == 2);
}

REQUIRE(model->parameters()["weight"].grad().numel() == 3 * 2 * 3 * 3);
REQUIRE(
model->parameters().at("weight").grad().numel() == 3 * 2 * 3 * 3);
}

SECTION("uneven") {
auto model = make(Conv2d(3, 2, IntVec({3, 2})).stride(2));
auto model = Conv2d(3, 2, {3, 2}).stride({2, 2}).build();
auto x = Var(at::CPU(at::kFloat).randn({2, 3, 5, 4}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand All @@ -79,12 +79,12 @@ TEST_CASE("containers") {
REQUIRE(y.size(i) == 2);
}

REQUIRE(model->parameters()["weight"].grad().numel() == 3 * 2 * 3 * 2);
REQUIRE(
model->parameters().at("weight").grad().numel() == 3 * 2 * 3 * 2);
}
}

SECTION("3d") {
auto model = make(Conv3d(3, 2, 3).stride(2));
auto model = Conv3d(3, 2, 3).stride(2).build();
auto x = Var(at::CPU(at::kFloat).randn({2, 3, 5, 5, 5}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand All @@ -97,13 +97,13 @@ TEST_CASE("containers") {
}

REQUIRE(
model->parameters()["weight"].grad().numel() == 3 * 2 * 3 * 3 * 3);
model->parameters().at("weight").grad().numel() ==
3 * 2 * 3 * 3 * 3);
}
}

SECTION("linear") {
SECTION("basic1") {
auto model = make(Linear(5, 2));
auto model = Linear(5, 2).build();
auto x = Var(at::CPU(at::kFloat).randn({10, 5}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand All @@ -114,14 +114,14 @@ TEST_CASE("containers") {
REQUIRE(y.size(0) == 10);
REQUIRE(y.size(1) == 2);

REQUIRE(model->parameters()["weight"].grad().numel() == 2 * 5);
REQUIRE(model->parameters().at("weight").grad().numel() == 2 * 5);
}

SECTION("sequential") {
auto model = make(ContainerList()
.append(make(Linear(10, 3)))
.append(make(Linear(3, 5)))
.append(make(Linear(5, 100))));
auto model = std::make_shared<ContainerList>();
model->append(Linear(10, 3).build());
model->append(Linear(3, 5).build());
model->append(Linear(5, 100).build());

auto x = Var(at::CPU(at::kFloat).randn({1000, 10}));
for (auto layer : *model) {
Expand All @@ -137,10 +137,10 @@ TEST_CASE("containers") {
}

SECTION("simple") {
auto model = make(SimpleContainer());
auto l1 = model->add(make(Linear(10, 3)), "l1");
auto l2 = model->add(make(Linear(3, 5)), "l2");
auto l3 = model->add(make(Linear(5, 100)), "l3");
auto model = std::make_shared<SimpleContainer>();
auto l1 = model->add(Linear(10, 3).build(), "l1");
auto l2 = model->add(Linear(3, 5).build(), "l2");
auto l3 = model->add(Linear(5, 100).build(), "l3");

auto x = Var(at::CPU(at::kFloat).randn({1000, 10}));
x = l1->forward({x})[0].clamp_min(0);
Expand All @@ -158,7 +158,7 @@ TEST_CASE("containers") {
SECTION("embedding") {
SECTION("basic") {
int dict_size = 10;
auto model = make(Embedding(dict_size, 2));
auto model = Embedding(dict_size, 2).build();
// Cannot get gradients to change indices (input) - only for embedding
// params
auto x = Var(at::CPU(at::kLong).tensor({10}).fill_(dict_size - 1), false);
Expand All @@ -171,11 +171,11 @@ TEST_CASE("containers") {
REQUIRE(y.size(0) == 10);
REQUIRE(y.size(1) == 2);

REQUIRE(model->parameters()["weight"].grad().numel() == 2 * dict_size);
REQUIRE(model->parameters().at("table").grad().numel() == 2 * dict_size);
}

SECTION("list") {
auto model = make(Embedding(6, 4));
auto model = Embedding(6, 4).build();
auto x = Var(at::CPU(at::kLong).tensor({2, 3}).fill_(5), false);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand All @@ -189,7 +189,7 @@ TEST_CASE("containers") {
}

SECTION("dropout") {
auto dropout = make(Dropout(0.5));
auto dropout = Dropout(0.5).build();
Variable x = Var(at::CPU(at::kFloat).ones(100));
Variable y = dropout->forward({x})[0];

Expand All @@ -207,7 +207,7 @@ TEST_CASE("containers") {
}

SECTION("param") {
auto model = make(NestedModel());
auto model = std::make_shared<NestedModel>();
REQUIRE(model->param("param").size(0) == 3);
REQUIRE(model->param("param").size(1) == 2);
REQUIRE(model->param("param").size(2) == 21);
Expand All @@ -224,11 +224,25 @@ TEST_CASE("containers") {
REQUIRE(model->param("test.l3.weight").size(0) == 100);
REQUIRE(model->param("test.l3.weight").size(1) == 5);
}

SECTION("functional") {
bool was_called = false;
// clang-format off
auto functional = Functional([&was_called](variable_list input) {
was_called = true;
return input;
}).build();
// clang-format on
auto output = functional->forward({Var(at::CPU(at::kFloat).ones(5))});
REQUIRE(was_called);
REQUIRE(output.size() == 1);
REQUIRE(output.front().equal(Var(at::CPU(at::kFloat).ones(5))));
}
}

TEST_CASE("containers_cuda", "[cuda]") {
SECTION("1") {
auto model = make(Linear(5, 2));
auto model = Linear(5, 2).build();
model->cuda();
auto x = Var(at::CUDA(at::kFloat).randn({10, 5}), true);
auto y = model->forward({x})[0];
Expand All @@ -240,11 +254,11 @@ TEST_CASE("containers_cuda", "[cuda]") {
REQUIRE(y.size(0) == 10);
REQUIRE(y.size(1) == 2);

REQUIRE(model->parameters()["weight"].grad().numel() == 2 * 5);
REQUIRE(model->parameters().at("weight").grad().numel() == 2 * 5);
}

SECTION("2") {
auto model = make(Linear(5, 2));
auto model = Linear(5, 2).build();
model->cuda();
model->cpu();
auto x = Var(at::CPU(at::kFloat).randn({10, 5}), true);
Expand All @@ -257,6 +271,6 @@ TEST_CASE("containers_cuda", "[cuda]") {
REQUIRE(y.size(0) == 10);
REQUIRE(y.size(1) == 2);

REQUIRE(model->parameters()["weight"].grad().numel() == 2 * 5);
REQUIRE(model->parameters().at("weight").grad().numel() == 2 * 5);
}
}
40 changes: 20 additions & 20 deletions test/cpp/api/integration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,10 +221,10 @@ TEST_CASE("integration") {
std::cerr
<< "Training episodic policy gradient with a critic for up to 3000"
" episodes, rest your eyes for a bit!\n";
auto model = make(SimpleContainer());
auto linear = model->add(make(Linear(4, 128)), "linear");
auto policyHead = model->add(make(Linear(128, 2)), "policy");
auto valueHead = model->add(make(Linear(128, 1)), "action");
auto model = std::make_shared<SimpleContainer>();
auto linear = model->add(Linear(4, 128).build(), "linear");
auto policyHead = model->add(Linear(128, 2).build(), "policy");
auto valueHead = model->add(Linear(128, 1).build(), "action");
auto optim = Adam(model, 1e-3).make();

std::vector<Variable> saved_log_probs;
Expand Down Expand Up @@ -320,13 +320,13 @@ TEST_CASE("integration") {
}

TEST_CASE("integration/mnist", "[cuda]") {
auto model = make(SimpleContainer());
auto conv1 = model->add(make(Conv2d(1, 10, 5)), "conv1");
auto conv2 = model->add(make(Conv2d(10, 20, 5)), "conv2");
auto drop = make(Dropout(0.3));
auto drop2d = make(Dropout2d(0.3));
auto linear1 = model->add(make(Linear(320, 50)), "linear1");
auto linear2 = model->add(make(Linear(50, 10)), "linear2");
auto model = std::make_shared<SimpleContainer>();
auto conv1 = model->add(Conv2d(1, 10, 5).build(), "conv1");
auto conv2 = model->add(Conv2d(10, 20, 5).build(), "conv2");
auto drop = Dropout(0.3).build();
auto drop2d = Dropout2d(0.3).build();
auto linear1 = model->add(Linear(320, 50).build(), "linear1");
auto linear2 = model->add(Linear(50, 10).build(), "linear2");

auto forward = [&](Variable x) {
x = std::get<0>(at::max_pool2d(conv1->forward({x})[0], {2, 2}))
Expand Down Expand Up @@ -355,15 +355,15 @@ TEST_CASE("integration/mnist", "[cuda]") {
}

TEST_CASE("integration/mnist/batchnorm", "[cuda]") {
auto model = make(SimpleContainer());
auto conv1 = model->add(make(Conv2d(1, 10, 5)), "conv1");
auto batchnorm2d = model->add(
make(BatchNorm(10, /*affine=*/true, /*stateful=*/true)), "batchnorm2d");
auto conv2 = model->add(make(Conv2d(10, 20, 5)), "conv2");
auto linear1 = model->add(make(Linear(320, 50)), "linear1");
auto batchnorm1 = model->add(
make(BatchNorm(50, /*affine=*/true, /*stateful=*/true)), "batchnorm1");
auto linear2 = model->add(make(Linear(50, 10)), "linear2");
auto model = std::make_shared<SimpleContainer>();
auto conv1 = model->add(Conv2d(1, 10, 5).build(), "conv1");
auto batchnorm2d =
model->add(BatchNorm(10).stateful(true).build(), "batchnorm2d");
auto conv2 = model->add(Conv2d(10, 20, 5).build(), "conv2");
auto linear1 = model->add(Linear(320, 50).build(), "linear1");
auto batchnorm1 =
model->add(BatchNorm(50).stateful(true).build(), "batchnorm1");
auto linear2 = model->add(Linear(50, 10).build(), "linear2");

auto forward = [&](Variable x) {
x = std::get<0>(at::max_pool2d(conv1->forward({x})[0], {2, 2}))
Expand Down
53 changes: 52 additions & 1 deletion test/cpp/api/misc.cpp
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
#include <catch.hpp>

#include <torch/expanding_array.h>
#include <torch/torch.h>

using namespace torch;
using namespace torch::nn;

using Catch::StartsWith;

TEST_CASE("misc") {
SECTION("no_grad") {
no_grad_guard guard;
auto model = make(Linear(5, 2));
auto model = Linear(5, 2).build();
auto x = Var(at::CPU(at::kFloat).randn({10, 5}), true);
auto y = model->forward({x})[0];
Variable s = y.sum();
Expand Down Expand Up @@ -41,3 +44,51 @@ TEST_CASE("misc_cuda", "[cuda]") {
REQUIRE(l_inf < 1e-10);
}
}

TEST_CASE("expanding-array") {
SECTION("successful construction") {
SECTION("initializer_list") {
ExpandingArray<5> e({1, 2, 3, 4, 5});
REQUIRE(e.size() == 5);
for (size_t i = 0; i < e.size(); ++i) {
REQUIRE((*e)[i] == i + 1);
}
}

SECTION("vector") {
ExpandingArray<5> e(std::vector<int64_t>{1, 2, 3, 4, 5});
REQUIRE(e.size() == 5);
for (size_t i = 0; i < e.size(); ++i) {
REQUIRE((*e)[i] == i + 1);
}
}

SECTION("array") {
ExpandingArray<5> e(std::array<int64_t, 5>({1, 2, 3, 4, 5}));
REQUIRE(e.size() == 5);
for (size_t i = 0; i < e.size(); ++i) {
REQUIRE((*e)[i] == i + 1);
}
}

SECTION("single value") {
ExpandingArray<5> e(5);
REQUIRE(e.size() == 5);
for (size_t i = 0; i < e.size(); ++i) {
REQUIRE((*e)[i] == 5);
}
}
}
SECTION("throws for incorrect size on construction") {
SECTION("initializer_list") {
REQUIRE_THROWS_WITH(
ExpandingArray<5>({1, 2, 3, 4, 5, 6, 7}),
StartsWith("Expected 5 values, but instead got 7"));
}
SECTION("vector") {
REQUIRE_THROWS_WITH(
ExpandingArray<5>(std::vector<int64_t>({1, 2, 3, 4, 5, 6, 7})),
StartsWith("Expected 5 values, but instead got 7"));
}
}
}
Loading

0 comments on commit 199831d

Please sign in to comment.