Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion torch_xla/csrc/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,6 @@ xla::XlaOp BuildConvolutionBias(
CHECK_GE(node_inputs.size(), size_t(4));
const auto window_strides = XlaHelpers::I64List(
node->get<std::vector<int64_t>>(attr::stride).value());
const auto node_outputs = node->outputs();
const auto conv = BuildConvolution(node, input, kernel, conv_precision);
auto broadcast_sizes = XlaHelpers::SizesOfXlaOp(conv);
CHECK_EQ(broadcast_sizes.size(), 4);
Expand Down
1 change: 0 additions & 1 deletion torch_xla/csrc/data_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,6 @@ xla::XlaOp BuildCat(const Node* node,
}

std::vector<xla::XlaOp> BuildChunk(const Node* node, const xla::XlaOp& input) {
const auto node_input = node->inputs()[0];
int64_t chunks = node->get<int64_t>(attr::chunks).value();
int64_t dim = node->get<int64_t>(attr::dim).value();
XLA_CHECK_GE(dim, 0) << "Negative dimension specified for chunk operator";
Expand Down
1 change: 1 addition & 0 deletions torch_xla/csrc/helpers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,7 @@ xla::PrimitiveType XlaHelpers::MakeXlaPrimitiveType(
return xla::PrimitiveType::S64;
default:
LOG(FATAL) << "Type not supported: " << scalar_type;
return xla::PrimitiveType::PRIMITIVE_TYPE_INVALID;
}
}

Expand Down
1 change: 0 additions & 1 deletion torch_xla/csrc/log_softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ xla::XlaOp BuildLogSoftmaxGrad(const Node* node, const xla::XlaOp& grad_output,
// Inspired from tf2xla.
xla::int64 dim = node->get<int64_t>(attr::dim).value();

const auto node_inputs = node->inputs();
auto input_size = XlaHelpers::SizesOfXlaOp(grad_output);
std::vector<xla::int64> broadcast_dimensions;
for (size_t broadcast_dim = 0; broadcast_dim < input_size.size();
Expand Down
4 changes: 0 additions & 4 deletions torch_xla/csrc/nll_loss.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,6 @@ xla::XlaOp BuildNllLossBackward(const Node* node, const xla::XlaOp& logits,
const int kBatchDim = 0;
auto builder = logits.builder();
const auto logits_shape = XlaHelpers::ShapeOfXlaOp(logits);
const auto zero =
XlaHelpers::ScalarValue<float>(0, logits_shape.element_type(), builder);
const auto one =
XlaHelpers::ScalarValue<float>(1, logits_shape.element_type(), builder);
xla::XlaOp one_hot_labels = LabelsToOneHot(
/*builder=*/builder,
/*depth=*/logits_shape.dimensions(1),
Expand Down
2 changes: 1 addition & 1 deletion torch_xla/csrc/torch_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ py::object XlaPackTensorList(const XlaModule::TensorBatchVector& outputs) {
}
tuple[i] = replica_tuple;
}
return tuple;
return std::move(tuple);
}

} // namespace jit
Expand Down