Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add at::ceil and at::floor #298

Merged
merged 1 commit into from
Feb 19, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions test/cpp/test_aten_xla_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -644,6 +644,26 @@ TEST_F(AtenXlaTensorTest, TestClampMax) {
});
}

TEST_F(AtenXlaTensorTest, TestCeil) {
at::Tensor a = at::randn({2, 2}, at::TensorOptions(at::kFloat)) * 100.0;
at::Tensor b = at::ceil(a);
ForEachDevice([&](const Device& device) {
at::Tensor xla_a = bridge::CreateXlaTensor(a, device);
at::Tensor xla_b = at::ceil(xla_a);
AllClose(b, xla_b);
});
}

TEST_F(AtenXlaTensorTest, TestFloor) {
at::Tensor a = at::randn({2, 2}, at::TensorOptions(at::kFloat)) * 100.0;
at::Tensor b = at::floor(a);
ForEachDevice([&](const Device& device) {
at::Tensor xla_a = bridge::CreateXlaTensor(a, device);
at::Tensor xla_b = at::floor(xla_a);
AllClose(b, xla_b);
});
}

TEST_F(AtenXlaTensorTest, TestNeg) {
at::Tensor a = at::rand({2, 2}, at::TensorOptions(at::kFloat));
at::Tensor b = at::neg(a);
Expand Down
9 changes: 9 additions & 0 deletions torch_xla/csrc/aten_xla_type.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,15 @@ at::Tensor& AtenXlaType::clamp_(at::Tensor& self, c10::optional<at::Scalar> min,
return self;
}

at::Tensor AtenXlaType::ceil(const at::Tensor& self) const {
return bridge::AtenFromXlaTensor(XLATensor::ceil(bridge::GetXlaTensor(self)));
}

at::Tensor AtenXlaType::floor(const at::Tensor& self) const {
return bridge::AtenFromXlaTensor(
XLATensor::floor(bridge::GetXlaTensor(self)));
}

int64_t AtenXlaType::size(const at::Tensor& self, int64_t dim) const {
return bridge::GetXlaTensor(self).size(dim);
}
Expand Down
4 changes: 4 additions & 0 deletions torch_xla/csrc/aten_xla_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,10 @@ class AtenXlaType : public AtenXlaTypeBase {
at::Tensor& clamp_(at::Tensor& self, c10::optional<at::Scalar> min,
c10::optional<at::Scalar> max) const override;

at::Tensor ceil(const at::Tensor& self) const override;

at::Tensor floor(const at::Tensor& self) const override;

int64_t size(const at::Tensor& self, int64_t dim) const override;

at::Tensor slice(const at::Tensor& self, int64_t dim, int64_t start,
Expand Down
2 changes: 2 additions & 0 deletions torch_xla/csrc/ops/ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ PTXLA_UNARY_OP(Log1p, at::aten::log1p, xla::Log1p);
PTXLA_UNARY_OP(Erf, at::aten::erf, xla::Erf);
PTXLA_UNARY_OP(Erfc, at::aten::erfc, xla::Erfc);
PTXLA_UNARY_OP(Sqrt, at::aten::sqrt, xla::Sqrt);
PTXLA_UNARY_OP(Ceil, at::aten::ceil, xla::Ceil);
PTXLA_UNARY_OP(Floor, at::aten::floor, xla::Floor);

PTXLA_BINARY_OP(Min, at::aten::min, xla::Min);
PTXLA_BINARY_OP(Max, at::aten::max, xla::Max);
Expand Down
4 changes: 4 additions & 0 deletions torch_xla/csrc/ops/ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ NodePtr Sigmoid(const Value& input);
NodePtr Clamp(const Value& input, c10::optional<at::Scalar> min,
c10::optional<at::Scalar> max);

NodePtr Ceil(const Value& input);

NodePtr Floor(const Value& input);

NodePtr AddMatMulOp(const Value& input, const Value& weight, const Value& bias,
bool use_full_conv_precision);

Expand Down
8 changes: 8 additions & 0 deletions torch_xla/csrc/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -854,6 +854,14 @@ void XLATensor::clamp_(XLATensor& input, c10::optional<at::Scalar> min,
input.SetIrValue(ir::ops::Clamp(input.GetIrValue(), min, max));
}

XLATensor XLATensor::ceil(const XLATensor& input) {
return Create(ir::ops::Ceil(input.GetIrValue()), input.GetDevice());
}

XLATensor XLATensor::floor(const XLATensor& input) {
return Create(ir::ops::Floor(input.GetIrValue()), input.GetDevice());
}

XLATensor XLATensor::slice(const XLATensor& input, xla::int64 dim,
xla::int64 start, xla::int64 end, xla::int64 step) {
return Create(
Expand Down
4 changes: 4 additions & 0 deletions torch_xla/csrc/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,10 @@ class XLATensor {
static void clamp_(XLATensor& input, c10::optional<at::Scalar> min,
c10::optional<at::Scalar> max);

static XLATensor ceil(const XLATensor& input);

static XLATensor floor(const XLATensor& input);

static XLATensor slice(const XLATensor& input, xla::int64 dim,
xla::int64 start, xla::int64 end, xla::int64 step);

Expand Down