From f560cfc090377b402b1fbbfafca15e393091e019 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Mon, 11 Feb 2019 13:04:50 -0800 Subject: [PATCH] Use dereference operator in IR Values. --- torch_xla/csrc/ir.cpp | 4 ++-- torch_xla/csrc/ops/avg_pool2d.cpp | 2 +- torch_xla/csrc/ops/avg_pool2d_backward.cpp | 2 +- torch_xla/csrc/ops/conv2d.cpp | 2 +- torch_xla/csrc/ops/conv2d_backward.cpp | 2 +- torch_xla/csrc/ops/cross_replica_sum.cpp | 2 +- torch_xla/csrc/ops/max_pool2d.cpp | 2 +- torch_xla/csrc/ops/max_pool2d_backward.cpp | 2 +- torch_xla/csrc/ops/ops.cpp | 12 ++++++------ torch_xla/csrc/ops/softmax.cpp | 2 +- torch_xla/csrc/ops/threshold.cpp | 2 +- torch_xla/csrc/ops/threshold_backward.cpp | 2 +- torch_xla/csrc/ops/view.cpp | 2 +- 13 files changed, 19 insertions(+), 19 deletions(-) diff --git a/torch_xla/csrc/ir.cpp b/torch_xla/csrc/ir.cpp index 86de3e690839..84e488c28e6e 100644 --- a/torch_xla/csrc/ir.cpp +++ b/torch_xla/csrc/ir.cpp @@ -47,8 +47,8 @@ Node::Node(OpKind op, OpList operands, xla::Shape shape, size_t num_outputs, hash_(xla::util::HashCombine(op_.hash(), hash_seed)) { for (auto& operand : operands) { AddOperand(operand.node, operand.index); - graph_size_ += operand.node->graph_size(); - hash_ = xla::util::HashCombine(hash_, operand.node->hash()); + graph_size_ += operand->graph_size(); + hash_ = xla::util::HashCombine(hash_, operand->hash()); } } diff --git a/torch_xla/csrc/ops/avg_pool2d.cpp b/torch_xla/csrc/ops/avg_pool2d.cpp index 8b5a3cec026d..7cc165f155fb 100644 --- a/torch_xla/csrc/ops/avg_pool2d.cpp +++ b/torch_xla/csrc/ops/avg_pool2d.cpp @@ -26,7 +26,7 @@ xla::Shape NodeOutputShape( return BuildAvgPool2d(operands[0], kernel_size, stride, padding, count_include_pad); }; - return InferOutputShape({input.node->shape()}, lower_for_shape_fn); + return InferOutputShape({input->shape()}, lower_for_shape_fn); } } // namespace diff --git a/torch_xla/csrc/ops/avg_pool2d_backward.cpp b/torch_xla/csrc/ops/avg_pool2d_backward.cpp index f45a26011312..1ffefe6ef4ba 100644 --- a/torch_xla/csrc/ops/avg_pool2d_backward.cpp +++ b/torch_xla/csrc/ops/avg_pool2d_backward.cpp @@ -26,7 +26,7 @@ xla::Shape NodeOutputShape( /*input=*/operands[1], kernel_size, stride, padding, count_include_pad); }; - return InferOutputShape({grad_output.node->shape(), input.node->shape()}, + return InferOutputShape({grad_output->shape(), input->shape()}, lower_for_shape_fn); } diff --git a/torch_xla/csrc/ops/conv2d.cpp b/torch_xla/csrc/ops/conv2d.cpp index 03311aaad3a0..9cdadeaa39dd 100644 --- a/torch_xla/csrc/ops/conv2d.cpp +++ b/torch_xla/csrc/ops/conv2d.cpp @@ -26,7 +26,7 @@ xla::Shape NodeOutputShape( return BuildConvolution(operands[0], operands[1], stride, padding, xla::PrecisionConfig::DEFAULT); }; - return InferOutputShape({input.node->shape(), weight.node->shape()}, + return InferOutputShape({input->shape(), weight->shape()}, lower_for_shape_fn); } diff --git a/torch_xla/csrc/ops/conv2d_backward.cpp b/torch_xla/csrc/ops/conv2d_backward.cpp index d1e6d09ce496..a289c675804c 100644 --- a/torch_xla/csrc/ops/conv2d_backward.cpp +++ b/torch_xla/csrc/ops/conv2d_backward.cpp @@ -29,7 +29,7 @@ xla::Shape NodeOutputShape( {grads.grad_input, grads.grad_weight, grads.grad_bias}); }; return InferOutputShape( - {grad_output.node->shape(), input.node->shape(), weight.node->shape()}, + {grad_output->shape(), input->shape(), weight->shape()}, lower_for_shape_fn); } diff --git a/torch_xla/csrc/ops/cross_replica_sum.cpp b/torch_xla/csrc/ops/cross_replica_sum.cpp index 827cdcd52359..058f0778a0dd 100644 --- a/torch_xla/csrc/ops/cross_replica_sum.cpp +++ b/torch_xla/csrc/ops/cross_replica_sum.cpp @@ -10,7 +10,7 @@ namespace ops { CrossReplicaSum::CrossReplicaSum(const Value& operand, std::vector> groups) - : Node(xla_cross_replica_sum, {operand}, operand.node->shape(), + : Node(xla_cross_replica_sum, {operand}, operand->shape(), /*num_outputs=*/1, xla::util::MHash(groups)), groups_(std::move(groups)) {} diff --git a/torch_xla/csrc/ops/max_pool2d.cpp b/torch_xla/csrc/ops/max_pool2d.cpp index 9bdd3bea7ac2..8736169b9db2 100644 --- a/torch_xla/csrc/ops/max_pool2d.cpp +++ b/torch_xla/csrc/ops/max_pool2d.cpp @@ -24,7 +24,7 @@ xla::Shape NodeOutputShape( << "Unexpected number of operands: " << operands.size(); return BuildMaxPool2d(operands[0], kernel_size, stride, padding); }; - return InferOutputShape({input.node->shape()}, lower_for_shape_fn); + return InferOutputShape({input->shape()}, lower_for_shape_fn); } } // namespace diff --git a/torch_xla/csrc/ops/max_pool2d_backward.cpp b/torch_xla/csrc/ops/max_pool2d_backward.cpp index f106c6f476ca..28c1eaf6d01e 100644 --- a/torch_xla/csrc/ops/max_pool2d_backward.cpp +++ b/torch_xla/csrc/ops/max_pool2d_backward.cpp @@ -24,7 +24,7 @@ xla::Shape NodeOutputShape( /*input=*/operands[1], kernel_size, stride, padding); }; - return InferOutputShape({grad_output.node->shape(), input.node->shape()}, + return InferOutputShape({grad_output->shape(), input->shape()}, lower_for_shape_fn); } diff --git a/torch_xla/csrc/ops/ops.cpp b/torch_xla/csrc/ops/ops.cpp index 726c9ce54904..1401aa785ddd 100644 --- a/torch_xla/csrc/ops/ops.cpp +++ b/torch_xla/csrc/ops/ops.cpp @@ -24,7 +24,7 @@ NodePtr ReluOp(const Value& input) { return BuildRelu(operands[0]); }; xla::Shape output_shape = - ir::ops::InferOutputShape({input.node->shape()}, lower_for_shape_fn); + ir::ops::InferOutputShape({input->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::relu), ir::OpList{input}, output_shape, std::move(lower_fn)); } @@ -42,7 +42,7 @@ NodePtr TransposeOp(const Value& input) { return xla::Transpose(operands[0], {1, 0}); }; xla::Shape output_shape = - ir::ops::InferOutputShape({input.node->shape()}, lower_for_shape_fn); + ir::ops::InferOutputShape({input->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::t), ir::OpList{input}, output_shape, std::move(lower_fn)); } @@ -78,7 +78,7 @@ NodePtr AddMatMulOp(const Value& input, const Value& weight, const Value& bias, return xla::Dot(operands[0], operands[1]); }; xla::Shape output_shape = ir::ops::InferOutputShape( - {input.node->shape(), weight.node->shape()}, lower_for_shape_fn); + {input->shape(), weight->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::addmm), ir::OpList{input, weight, bias}, output_shape, std::move(lower_fn)); @@ -106,7 +106,7 @@ NodePtr MatMulOp(const Value& input, const Value& weight, return xla::Dot(operands[0], operands[1]); }; xla::Shape output_shape = ir::ops::InferOutputShape( - {input.node->shape(), weight.node->shape()}, lower_for_shape_fn); + {input->shape(), weight->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::mm), ir::OpList{input, weight}, output_shape, std::move(lower_fn)); } @@ -125,7 +125,7 @@ NodePtr NllLossOp(const Value& logits, const Value& labels) { return BuildNllLoss(/*logits=*/operands[0], /*labels=*/operands[1]); }; xla::Shape output_shape = ir::ops::InferOutputShape( - {logits.node->shape(), labels.node->shape()}, lower_for_shape_fn); + {logits->shape(), labels->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::nll_loss), ir::OpList{logits, labels}, output_shape, std::move(lower_fn)); @@ -145,7 +145,7 @@ NodePtr NllLossBackwardOp(const Value& logits, const Value& labels) { return BuildNllLossBackward(/*logits=*/operands[0], /*labels=*/operands[1]); }; xla::Shape output_shape = ir::ops::InferOutputShape( - {logits.node->shape(), labels.node->shape()}, lower_for_shape_fn); + {logits->shape(), labels->shape()}, lower_for_shape_fn); return ir::ops::GenericOp(ir::OpKind(at::aten::nll_loss_backward), ir::OpList{logits, labels}, output_shape, std::move(lower_fn)); diff --git a/torch_xla/csrc/ops/softmax.cpp b/torch_xla/csrc/ops/softmax.cpp index ffc7367b95eb..8ac4e8ae82bb 100644 --- a/torch_xla/csrc/ops/softmax.cpp +++ b/torch_xla/csrc/ops/softmax.cpp @@ -19,7 +19,7 @@ xla::Shape NodeOutputShape(const Value& input, xla::int64 dim) { << "Unexpected number of operands: " << operands.size(); return BuildLogSoftmax(operands[0], dim); }; - return InferOutputShape({input.node->shape()}, lower_for_shape_fn); + return InferOutputShape({input->shape()}, lower_for_shape_fn); } } // namespace diff --git a/torch_xla/csrc/ops/threshold.cpp b/torch_xla/csrc/ops/threshold.cpp index e51cb91c8afb..149ef6b30b67 100644 --- a/torch_xla/csrc/ops/threshold.cpp +++ b/torch_xla/csrc/ops/threshold.cpp @@ -8,7 +8,7 @@ namespace ir { namespace ops { Threshold::Threshold(const Value& input, float threshold, float value) - : Node(ir::OpKind(at::aten::threshold), {input}, input.node->shape(), + : Node(ir::OpKind(at::aten::threshold), {input}, input->shape(), /*num_outputs=*/1, xla::util::MHash(threshold, value)), threshold_(threshold), value_(value) {} diff --git a/torch_xla/csrc/ops/threshold_backward.cpp b/torch_xla/csrc/ops/threshold_backward.cpp index 72930d667121..fbf4c4b02d9a 100644 --- a/torch_xla/csrc/ops/threshold_backward.cpp +++ b/torch_xla/csrc/ops/threshold_backward.cpp @@ -10,7 +10,7 @@ namespace ops { ThresholdBackward::ThresholdBackward(const Value& grad_output, const Value& input, float threshold) : Node(ir::OpKind(at::aten::threshold_backward), {grad_output, input}, - input.node->shape(), /*num_outputs=*/1, xla::util::MHash(threshold)), + input->shape(), /*num_outputs=*/1, xla::util::MHash(threshold)), threshold_(threshold) {} XlaOpVector ThresholdBackward::Lower(LoweringContext* loctx) const { diff --git a/torch_xla/csrc/ops/view.cpp b/torch_xla/csrc/ops/view.cpp index 2673080260f2..68f7bb431d80 100644 --- a/torch_xla/csrc/ops/view.cpp +++ b/torch_xla/csrc/ops/view.cpp @@ -22,7 +22,7 @@ xla::Shape NodeOutputShape( << "Unexpected number of operands: " << operands.size(); return BuildView(operands[0], output_sizes); }; - return InferOutputShape({input.node->shape()}, lower_for_shape_fn); + return InferOutputShape({input->shape()}, lower_for_shape_fn); } } // namespace