Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove a few warnings on macOS #26675

Merged
merged 2 commits into from Mar 21, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensorflow/core/distributed_runtime/cancellable_call.h
Expand Up @@ -40,8 +40,8 @@ class CancellableCall {

void Start(const StatusCallback& done) {
CancellationToken token = cancel_mgr_->get_cancellation_token();
const bool not_yet_cancelled = cancel_mgr_->RegisterCallback(
token, [this, token]() { opts_.StartCancel(); });
const bool not_yet_cancelled =
cancel_mgr_->RegisterCallback(token, [this]() { opts_.StartCancel(); });
if (not_yet_cancelled) {
IssueCall([this, token, done](const Status& s) {
cancel_mgr_->DeregisterCallback(token);
Expand Down
Expand Up @@ -121,7 +121,7 @@ void CollectiveParamResolverDistributed::CompleteGroupAsync(
}
CompleteGroupDistributed(
cp.instance.device_names[0], &cp, cancel_mgr,
[this, response, done](const Status& s, const GroupRec* gr) {
[response, done](const Status& s, const GroupRec* gr) {
if (s.ok()) {
mutex_lock l(gr->mu);
response->set_group_key(gr->group.group_key);
Expand Down Expand Up @@ -159,7 +159,7 @@ void CollectiveParamResolverDistributed::CompleteInstanceAsync(
string* device = new string(request->device());
VLOG(1) << "New cp " << cp << " for device " << *device << " : "
<< cp->ToString();
StatusCallback done_and_cleanup = [this, cp, device, done](const Status& s) {
StatusCallback done_and_cleanup = [cp, device, done](const Status& s) {
done(s);
delete cp;
delete device;
Expand All @@ -180,8 +180,8 @@ void CollectiveParamResolverDistributed::CompleteInstanceAsync(
// retrieve it.
FindInstanceRec(
gr, cp,
[this, gr, cp, response, done_and_cleanup](
const Status& fi_status, InstanceRec* ir) {
[cp, response, done_and_cleanup](const Status& fi_status,
InstanceRec* ir) {
if (fi_status.ok()) {
mutex_lock l(ir->out_mu);
ir->WaitForOutMu(l);
Expand Down Expand Up @@ -352,11 +352,11 @@ void CollectiveParamResolverDistributed::UpdateInstanceCache(
delete irp;
};

FindInstanceRec(
gr, cp, [this, irp, continue_with_ir](const Status s, InstanceRec* irec) {
*irp = irec;
continue_with_ir(s);
});
FindInstanceRec(gr, cp,
[irp, continue_with_ir](const Status s, InstanceRec* irec) {
*irp = irec;
continue_with_ir(s);
});
}

void CollectiveParamResolverDistributed::CompleteInstanceDistributed(
Expand Down
Expand Up @@ -65,8 +65,7 @@ void PopulateTensorFromExtra(const RecvBufRespExtra& extra,
Tensor* cpu_tensor) {
char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor));
for (const auto& tensor_content_chunk : extra.tensor_content()) {
memcpy(head, tensor_content_chunk.data(),
tensor_content_chunk.size());
memcpy(head, tensor_content_chunk.data(), tensor_content_chunk.size());
head += tensor_content_chunk.size();
}
}
Expand Down Expand Up @@ -136,7 +135,7 @@ void CollectiveRemoteAccessDistributed::RecvFromPeer(
nullptr /*send_dev_ctx*/, to_device_ctx, cpu_dev,
to_device, cpu_attr, to_alloc_attr, cpu_tensor,
to_tensor, dev_to_dev_stream_index,
[this, cpu_tensor, done](const Status& s) {
[cpu_tensor, done](const Status& s) {
delete cpu_tensor;
// This callback must not block, so execute
// done in another thread.
Expand Down
Expand Up @@ -196,8 +196,7 @@ class GrpcMasterService : public AsyncServiceInterface {
call->SetCancelCallback([call_opts]() { call_opts->StartCancel(); });
master_impl_->RunStep(
call_opts, wrapped_request, wrapped_response,
[call, call_opts, wrapped_request, wrapped_response,
trace](const Status& status) {
[call, call_opts, wrapped_request, trace](const Status& status) {
call->ClearCancelCallback();
delete call_opts;
delete wrapped_request;
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/distributed_runtime/rpc/grpc_state.h
Expand Up @@ -56,11 +56,11 @@ class RPCState : public GrpcClientCQTag {
: call_opts_(call_opts),
threadpool_(threadpool),
done_(std::move(done)),
timeout_in_ms_(timeout_in_ms),
max_retries_(max_retries),
cq_(cq),
stub_(stub),
method_(method),
max_retries_(max_retries),
timeout_in_ms_(timeout_in_ms),
fail_fast_(fail_fast) {
response_ = response;
::grpc::Status s = GrpcMaybeUnparseProto(request, &request_buf_);
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/core/framework/reader_base.cc
Expand Up @@ -202,7 +202,7 @@ string ReaderBase::GetNextWorkLocked(QueueInterface* queue,
string work;
Notification n;
queue->TryDequeue(
context, [this, context, &n, &work](const QueueInterface::Tuple& tuple) {
context, [context, &n, &work](const QueueInterface::Tuple& tuple) {
if (context->status().ok()) {
if (tuple.size() != 1) {
context->SetStatus(
Expand Down
2 changes: 0 additions & 2 deletions tensorflow/core/framework/tensor_util.h
Expand Up @@ -175,14 +175,12 @@ template <typename RealType>
struct CopyHelper<std::complex<RealType>> {
template <typename SrcIter>
static void ToArray(SrcIter begin, SrcIter end, std::complex<RealType>* dst) {
using SrcType = typename std::iterator_traits<SrcIter>::value_type;
RealType* real_dst = reinterpret_cast<RealType*>(dst);
std::copy(begin, end, real_dst);
}

template <typename SrcIter, typename DstIter>
static void FromArray(SrcIter begin, SrcIter end, DstIter dst) {
using DstType = typename std::iterator_traits<DstIter>::value_type;
size_t n = std::distance(begin, end);
const RealType* real_begin = reinterpret_cast<const RealType*>(&(*begin));
std::copy_n(real_begin, 2 * n, dst);
Expand Down
5 changes: 4 additions & 1 deletion tensorflow/core/kernels/data/BUILD
Expand Up @@ -70,7 +70,10 @@ cc_library(
name = "stats_utils",
srcs = ["stats_utils.cc"],
hdrs = ["stats_utils.h"],
deps = ["//tensorflow/core:lib"],
deps = [
"//tensorflow/core:lib",
"@com_google_absl//absl/base:core_headers",
],
)

cc_library(
Expand Down
Expand Up @@ -136,8 +136,8 @@ class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {

*output = new Dataset(ctx, input, func_, batch_size, num_parallel_calls,
drop_remainder, output_types_, output_shapes_,
std::move(captured_func), &ctx->eigen_cpu_device(),
std::move(map_func), preserve_cardinality_);
std::move(captured_func), std::move(map_func),
preserve_cardinality_);
}

private:
Expand All @@ -149,7 +149,6 @@ class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
std::unique_ptr<CapturedFunction> captured_func,
const Eigen::ThreadPoolDevice* device,
MapAndBatchIteratorFunction map_func, bool preserve_cardinality)
: DatasetBase(DatasetContext(ctx)),
input_(input),
Expand All @@ -160,7 +159,6 @@ class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {
output_types_(output_types),
output_shapes_(output_shapes),
captured_func_(std::move(captured_func)),
device_(device),
map_func_(std::move(map_func)),
preserve_cardinality_(preserve_cardinality) {
input_->Ref();
Expand Down Expand Up @@ -263,8 +261,7 @@ class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {
max_batch_results_(std::min(kMaxBatchResults,
(params.dataset->num_parallel_calls_ +
params.dataset->batch_size_ - 1) /
params.dataset->batch_size_)) {
}
params.dataset->batch_size_)) {}

~Iterator() override {
mutex_lock l(*mu_);
Expand Down Expand Up @@ -815,7 +812,6 @@ class MapAndBatchDatasetOp : public UnaryDatasetOpKernel {
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::unique_ptr<CapturedFunction> captured_func_;
const Eigen::ThreadPoolDevice* device_; // not owned
const MapAndBatchIteratorFunction map_func_;
const bool preserve_cardinality_;
};
Expand Down
1 change: 1 addition & 0 deletions tensorflow/core/kernels/data/stats_utils.cc
Expand Up @@ -14,6 +14,7 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/core/kernels/data/stats_utils.h"

#include "absl/base/attributes.h"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Internally, I am seeing an error that indicates that tensorflow/core/kernels/data:stats_utils does not depend on a module exporting third_party/absl/base/attributes.h. Could you please fix that? Thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be done.

#include "tensorflow/core/lib/strings/strcat.h"

namespace tensorflow {
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/core/kernels/topk_op.cc
Expand Up @@ -22,14 +22,14 @@ limitations under the License.
#include <algorithm>
#include <numeric>
#include <vector>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/top_n.h"
#include "tensorflow/core/util/work_sharder.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"

namespace tensorflow {

Expand Down Expand Up @@ -134,7 +134,7 @@ struct TopKFunctor<CPUDevice, T> {
return Status::OK();
}

auto SortIndices = [&, context](int start_batch, int limit_batch) {
auto SortIndices = [&](int start_batch, int limit_batch) {
for (int32 b = start_batch; b < limit_batch; ++b) {
const T* input_data = &input(b, 0);
const auto stable_comp = [input_data](const int32 a, const int32 b) {
Expand Down
1 change: 0 additions & 1 deletion tensorflow/core/profiler/rpc/client/dump_tpu_profile.cc
Expand Up @@ -46,7 +46,6 @@ using ::tensorflow::protobuf::util::MessageToJsonString;
using ::tensorflow::str_util::EndsWith;
using ::tensorflow::strings::StrCat;

constexpr char kGraphRunPrefix[] = "tpu_profiler.hlo_graph.";
constexpr char kJsonOpProfileFileName[] = "op_profile.json";
constexpr char kJsonTraceFileName[] = "trace.json.gz";
constexpr char kProfilePluginDirectory[] = "plugins/profile/";
Expand Down