Skip to content

Commit

Permalink
ClangTidy - Readability cleanup:/code-findings-fixes.
Browse files Browse the repository at this point in the history
* unused using-declarations
* redundant string conversions
* C-style casts
* redundant get() call on smart pointer
* the 'empty' method should be used to check for emptiness instead of 'size'

PiperOrigin-RevId: 196585984
  • Loading branch information
tensorflower-gardener committed May 14, 2018
1 parent 5334631 commit f94d60a
Show file tree
Hide file tree
Showing 14 changed files with 21 additions and 32 deletions.
2 changes: 0 additions & 2 deletions tensorflow/cc/gradients/math_grad_test.cc
Expand Up @@ -31,7 +31,6 @@ using ops::AddN;
using ops::BatchMatMul;
using ops::Const;
using ops::Div;
using ops::Greater;
using ops::MatMul;
using ops::Max;
using ops::Maximum;
Expand All @@ -46,7 +45,6 @@ using ops::RealDiv;
using ops::SquaredDifference;
using ops::Sub;
using ops::Sum;
using ops::Where3;

// TODO(andydavis) Test gradient function against numeric gradients output.
// TODO(andydavis) As more gradients are added move common test functions
Expand Down
5 changes: 0 additions & 5 deletions tensorflow/compiler/xla/service/gpu/convolution_thunk.cc
Expand Up @@ -29,11 +29,6 @@ namespace xla {
namespace gpu {

using se::dnn::AlgorithmDesc;
using se::dnn::BatchDescriptor;
using se::dnn::ConvolutionDescriptor;
using se::dnn::DataLayout;
using se::dnn::FilterDescriptor;
using se::dnn::FilterLayout;

ConvolutionThunk::ConvolutionThunk(
CudnnConvKind convolution_kind, const BufferAllocation::Slice& input_buffer,
Expand Down
1 change: 0 additions & 1 deletion tensorflow/compiler/xla/service/hlo_evaluator.cc
Expand Up @@ -52,7 +52,6 @@ namespace xla {
namespace {

using tensorflow::gtl::ArraySlice;
using tensorflow::gtl::FlatSet;

template <typename OperandT>
StatusOr<std::unique_ptr<Literal>> Compare(const Shape& shape, HloOpcode opcode,
Expand Down
1 change: 0 additions & 1 deletion tensorflow/compiler/xla/tests/literal_test_util.cc
Expand Up @@ -43,7 +43,6 @@ namespace xla {
using ::tensorflow::strings::Appendf;
using ::tensorflow::strings::Printf;
using ::tensorflow::strings::StrAppend;
using ::tensorflow::strings::StrCat;

/* static */ ::testing::AssertionResult LiteralTestUtil::EqualShapes(
const Shape& expected, const Shape& actual) {
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc
Expand Up @@ -119,7 +119,7 @@ TEST(EventMgr, DelayedPolling) {
EXPECT_EQ(0, th.queue_size());
TensorReferenceVector* v = nullptr;
std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
CHECK(stream.get());
CHECK(stream);
stream->Init();
for (int i = 0; i < 5; ++i) {
v = new TensorReferenceVector;
Expand Down Expand Up @@ -151,7 +151,7 @@ TEST(EventMgr, FlushLargeTensorImmediately) {
TEST_EventMgrHelper th(&em);
EXPECT_EQ(0, live_tensor_bytes);
std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
CHECK(stream.get());
CHECK(stream);
stream->Init();
for (int i = 0; i < 5; ++i) {
TensorReferenceVector v;
Expand All @@ -168,7 +168,7 @@ TEST(EventMgr, ManySmallTensorsFlushedImmediately) {
TEST_EventMgrHelper th(&em);
EXPECT_EQ(0, live_tensor_bytes);
std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
CHECK(stream.get());
CHECK(stream);
stream->Init();
for (int i = 0; i < 5; ++i) {
TensorReferenceVector v;
Expand Down Expand Up @@ -209,7 +209,7 @@ TEST(EventMgr, ManySmallTensorsSeparateCallsFlushed) {
TEST_EventMgrHelper th(&em);
EXPECT_EQ(0, live_tensor_bytes);
std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
CHECK(stream.get());
CHECK(stream);
stream->Init();
for (int i = 0; i < 5; ++i) {
for (int i = 0; i < 1000; i++) {
Expand All @@ -232,7 +232,7 @@ TEST(EventMgr, NonEmptyShutdown) {
EXPECT_EQ(0, th.queue_size());
EXPECT_EQ(0, th.free_size());
std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
CHECK(stream.get());
CHECK(stream);
stream->Init();
for (int i = 0; i < 5; ++i) {
TensorReferenceVector* v = new TensorReferenceVector;
Expand Down
Expand Up @@ -577,7 +577,7 @@ void GrpcWorker::LoggingAsync(const LoggingRequest* request,
LoggingResponse* response, StatusCallback done) {
auto env = this->env();
if (env) {
auto session_mgr = (SessionMgr*)env->session_mgr;
auto session_mgr = env->session_mgr;
if (session_mgr) {
session_mgr->SetLogging(request->rpc_logging());
for (const auto& step_id : request->fetch_step_id()) {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/distributed_runtime/session_mgr.cc
Expand Up @@ -67,7 +67,7 @@ Status SessionMgr::CreateSession(const string& session,
worker_name = WorkerNameFromServerDef(server_def);
}

if (worker_cache != nullptr && default_worker_cache_.get() != nullptr) {
if (worker_cache != nullptr && default_worker_cache_ != nullptr) {
worker_cache->SetLogging(this->is_logging_active_);
}

Expand Down
Expand Up @@ -67,7 +67,7 @@ Status WorkerCachePartial::RefreshDeviceStatus(const string& device_name) {
};
std::unique_ptr<WorkerInterface, decltype(deleter)> rwi(CreateWorker(task),
deleter);
if (s.ok() && !rwi.get()) {
if (s.ok() && !rwi) {
s = errors::Internal("RefreshDeviceStatus, unknown worker task: ", task);
}

Expand Down
6 changes: 3 additions & 3 deletions tensorflow/core/grappler/optimizers/loop_optimizer.cc
Expand Up @@ -390,7 +390,7 @@ Status LoopInvariantNodeMotionOptimizer::Optimize() {
frame_children_[frame_ids[0]].insert(frame_ids[1]);
frame_parent_[frame_ids.back()] = frame_ids[frame_ids.size() - 2];
}
if (frame_ids.size() >= 1) {
if (!frame_ids.empty()) {
frame_children_.insert(std::make_pair(frame_ids.back(), empty_set_));
if (node->op() == "LoopCond") {
if (loop_cond_.count(frame_ids.back())) {
Expand All @@ -409,7 +409,7 @@ Status LoopInvariantNodeMotionOptimizer::Optimize() {
}

for (auto it = frame_children_.begin(); it != frame_children_.end(); ++it) {
if (it->second.size() == 0) {
if (it->second.empty()) {
worklist.push_back(it->first);
}
}
Expand All @@ -422,7 +422,7 @@ Status LoopInvariantNodeMotionOptimizer::Optimize() {
if (parent_it != frame_parent_.end()) {
int parent_id = parent_it->second;
frame_children_[parent_id].erase(frame_id);
if (frame_children_[parent_id].size() == 0) {
if (frame_children_[parent_id].empty()) {
worklist.push_back(parent_id);
}
}
Expand Down
13 changes: 6 additions & 7 deletions tensorflow/core/kernels/cudnn_rnn_ops.cc
Expand Up @@ -1411,7 +1411,7 @@ class CudnnRNNForwardOpV2<GPUDevice, T>
CudnnRnnAllocatorInTemp<T> reserve_space_allocator(context);
CudnnRnnAllocatorInTemp<uint8> workspace_allocator(context);
status = DoForward<T>(
context, *rnn_desc.get(), model_types(), model_shapes, input, input_h,
context, *rnn_desc, model_types(), model_shapes, input, input_h,
input_c, params, is_training(), output, output_h, output_c,
&reserve_space_allocator, &workspace_allocator, &fwd_profile_result);
if (!status.ok()) {
Expand All @@ -1422,12 +1422,11 @@ class CudnnRNNForwardOpV2<GPUDevice, T>
// Get reserve space from the forward pass.
Tensor reserve_space = reserve_space_allocator.get_allocated_tensor(0);
status = DoBackward<T>(
context, *rnn_desc.get(), model_types(), model_shapes, input,
input_h, input_c, params, output, output_h, output_c,
&output_backprop, &output_h_backprop, &output_c_backprop,
&reserve_space, &input_backprop, &input_h_backprop,
&input_c_backprop, &params_backprop, &workspace_allocator,
&bak_profile_result);
context, *rnn_desc, model_types(), model_shapes, input, input_h,
input_c, params, output, output_h, output_c, &output_backprop,
&output_h_backprop, &output_c_backprop, &reserve_space,
&input_backprop, &input_h_backprop, &input_c_backprop,
&params_backprop, &workspace_allocator, &bak_profile_result);
if (!status.ok()) {
continue;
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/kernels/ops_testutil.cc
Expand Up @@ -24,7 +24,7 @@ namespace tensorflow {

void OpsTestBase::SetDevice(const DeviceType& device_type,
std::unique_ptr<Device> device) {
CHECK(device_.get()) << "No device provided";
CHECK(device_) << "No device provided";
device_type_ = device_type;
device_ = std::move(device);
#ifdef GOOGLE_CUDA
Expand Down
1 change: 0 additions & 1 deletion tensorflow/core/ops/rpc_ops.cc
Expand Up @@ -18,7 +18,6 @@ limitations under the License.

namespace tensorflow {

using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;

Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/platform/cloud/oauth_client.cc
Expand Up @@ -97,7 +97,7 @@ Status CreateSignature(RSA* private_key, StringPiece to_sign,
}
std::unique_ptr<EVP_MD_CTX, std::function<void(EVP_MD_CTX*)>> md_ctx(
EVP_MD_CTX_create(), [](EVP_MD_CTX* ptr) { EVP_MD_CTX_destroy(ptr); });
if (!md_ctx.get()) {
if (!md_ctx) {
return errors::Internal("Could not create MD_CTX.");
}

Expand Down Expand Up @@ -196,7 +196,7 @@ Status OAuthClient::GetTokenFromServiceAccountJson(
std::unique_ptr<RSA, std::function<void(RSA*)>> private_key(
PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, nullptr, nullptr),
[](RSA* ptr) { RSA_free(ptr); });
if (!private_key.get()) {
if (!private_key) {
return errors::Internal("Could not deserialize the private key.");
}

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/lib/core/ndarray_tensor.cc
Expand Up @@ -145,7 +145,7 @@ Status PyBytesArrayMap(PyArrayObject* array, F f) {
while (PyArray_ITER_NOTDONE(iter.get())) {
auto item = tensorflow::make_safe(PyArray_GETITEM(
array, static_cast<char*>(PyArray_ITER_DATA(iter.get()))));
if (!item.get()) {
if (!item) {
return errors::Internal("Unable to get element from the feed - no item.");
}
char* ptr;
Expand Down

0 comments on commit f94d60a

Please sign in to comment.