-
Notifications
You must be signed in to change notification settings - Fork 74k
/
functionalize_cond.cc
1617 lines (1440 loc) · 61.3 KB
/
functionalize_cond.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/tf2xla/functionalize_cond.h"
#include <algorithm>
#include <deque>
#include <stack>
#include <unordered_set>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/tf2xla/frontend_attributes_util.h"
#include "tensorflow/compiler/tf2xla/functionalize_control_flow_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/union_find.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace functionalize_cond {
bool AncestorNode::operator<(const AncestorNode& other) const {
return (output_tensor.node->id() < other.output_tensor.node->id()) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index < other.output_tensor.index) ||
(output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index &&
type < other.type);
}
bool AncestorNode::operator==(const AncestorNode& other) const {
return output_tensor.node->id() == other.output_tensor.node->id() &&
output_tensor.index == other.output_tensor.index && type == other.type;
}
size_t AncestorNode::Hash::operator()(const AncestorNode& ancestor) const {
size_t h = std::hash<int>()(ancestor.output_tensor.node->id());
h = Hash64Combine(h, std::hash<int>()(ancestor.output_tensor.index));
return Hash64Combine(h, std::hash<int>()(static_cast<int>(ancestor.type)));
}
typedef std::tuple<StateMap::CondId, StateMap::AncestorId, OutputTensor>
ClusterTuple;
struct ClusterTupleLessThan {
bool operator()(const ClusterTuple& a, const ClusterTuple& b) const {
if (std::tie(std::get<0>(a), std::get<1>(a)) <
std::tie(std::get<0>(b), std::get<1>(b))) {
return true;
} else if (std::tie(std::get<0>(a), std::get<1>(a)) ==
std::tie(std::get<0>(b), std::get<1>(b))) {
return StateMap::OutputTensorLess()(std::get<2>(a), std::get<2>(b));
} else {
return false;
}
}
};
// TODO(jpienaar): Move to OutputTensor.
string DebugString(const OutputTensor& tensor) {
return absl::StrCat(tensor.node->name(), ":", tensor.index);
}
string Branch_Name(BranchType b) {
switch (b) {
case BranchType::kElseBranch:
return "else";
case BranchType::kThenBranch:
return "then";
case BranchType::kBoth:
return "both";
case BranchType::kNeither:
return "neither";
}
}
string DebugString(StateMap::CondId cond_state) {
if (cond_state == nullptr || cond_state->empty()) return "{}";
using value_type = StateMap::CondState::value_type;
return absl::StrCat(
"{",
absl::StrJoin(*cond_state, ", ",
[](string* output, const value_type& pred_branch) {
const OutputTensor& pred = pred_branch.first;
const BranchType& branch = pred_branch.second;
if (branch == BranchType::kNeither)
absl::StrAppend(output, "d");
else
absl::StrAppend(output, "s(", DebugString(pred), ",",
Branch_Name(branch), ")");
}),
"}");
}
// Returns the predicate of a switch.
Status GetSwitchPredicate(const Node& switch_node, OutputTensor* pred) {
const Edge* pred_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(1, &pred_edge));
// The predicate can be preceded by a identity node. Look through
// identity nodes to predicate.
while (pred_edge->src()->IsIdentity()) {
TF_RETURN_IF_ERROR(pred_edge->src()->input_edge(0, &pred_edge));
}
*pred = OutputTensor(pred_edge->src(), pred_edge->src_output());
return OkStatus();
}
Status GetSwitchValue(const Node& switch_node, OutputTensor* val) {
const Edge* val_edge;
TF_RETURN_IF_ERROR(switch_node.input_edge(0, &val_edge));
*val = OutputTensor(val_edge->src(), val_edge->src_output());
return OkStatus();
}
bool StateMap::OutputTensorLess::operator()(const OutputTensor& lhs,
const OutputTensor& rhs) const {
return (lhs.node->id() < rhs.node->id()) ||
(lhs.node->id() == rhs.node->id() && lhs.index < rhs.index);
}
struct CondStateLess {
bool operator()(const StateMap::CondState::value_type& lhs,
const StateMap::CondState::value_type& rhs) const {
if (StateMap::OutputTensorLess().operator()(lhs.first, rhs.first))
return true;
if (lhs.first.node->id() == rhs.first.node->id() &&
lhs.first.index == rhs.first.index)
return lhs.second < rhs.second;
return false;
}
};
StateMap::StateMap(Graph* graph) {
node_to_condid_map_.resize(graph->num_node_ids());
node_to_ancestorid_map_.resize(graph->num_node_ids());
// Initialize the dead state (empty state is designated with a nullptr).
dead_id_ = GetCondId(
{std::make_pair(OutputTensor(nullptr, -1), BranchType::kNeither)});
}
bool StateMap::IsDead(StateMap::CondId id) const { return id == dead_id_; }
bool StateMap::IsEmpty(StateMap::CondId id) const { return id == nullptr; }
size_t StateMap::Hash::operator()(const StateMap::CondState& map) const {
if (map.empty()) return 0;
// Compute hash of the front element.
auto it = map.begin();
size_t h = Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second));
for (++it; it != map.end(); ++it) {
// Combine the has with the different elements in the map.
h = Hash64Combine(h, Hash64Combine(OutputTensor::Hash()(it->first),
hash<BranchType>()(it->second)));
}
return h;
}
size_t StateMap::Hash::operator()(const StateMap::AncestorState& map) const {
if (map.empty()) return 0;
// Compute hash of the front element.
auto it = map.begin();
size_t h = AncestorNode::Hash()(*it);
for (++it; it != map.end(); ++it) {
// Combine the has with the different elements in the map.
h = Hash64Combine(h, AncestorNode::Hash()(*it));
}
return h;
}
// CondArgNode represents a input to the conditional and its corresponding
// switch nodes.
struct CondArgNode {
explicit CondArgNode(Node* src, int src_output)
: src(src), src_output(src_output) {}
string ToString() const {
return absl::StrCat("src=", src->name(), ":", src_output,
" switches=", NodesToString(switches));
}
Node* src;
int src_output;
std::array<Node*, 2> branch_copy;
std::vector<Node*> switches;
};
using CondArgNodes = std::vector<CondArgNode>;
string DebugString(const CondArgNodes& nodes) {
return absl::StrCat(
"[",
absl::StrJoin(nodes, ", ",
[](string* output, const CondArgNode& node) {
absl::StrAppend(output, node.ToString());
}),
"]");
}
StateMap::CondId StateMap::LookupCondId(const Node* node) const {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size) return node_to_condid_map_[node->id()];
return added_node_condid_mapping_.at(node->id());
}
StateMap::CondId StateMap::GetCondId(const StateMap::CondState& state) {
if (state.empty()) return nullptr;
return &*condstate_set_.insert(state).first;
}
void StateMap::ResetCondId(const Node* node, StateMap::CondId id) {
const int64_t map_size = node_to_condid_map_.size();
if (node->id() < map_size)
node_to_condid_map_[node->id()] = id;
else
added_node_condid_mapping_[node->id()] = id;
}
StateMap::AncestorId StateMap::LookupAncestorId(const Node* node) const {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size) return node_to_ancestorid_map_[node->id()];
return added_node_ancestorid_mapping_.at(node->id());
}
StateMap::AncestorId StateMap::GetAncestorId(
const StateMap::AncestorState& state) {
if (state.empty()) return nullptr;
return &*ancestorstate_set_.insert(state).first;
}
void StateMap::ResetAncestorId(const Node* node, StateMap::AncestorId id) {
const int64_t map_size = node_to_ancestorid_map_.size();
if (node->id() < map_size)
node_to_ancestorid_map_[node->id()] = id;
else
added_node_ancestorid_mapping_[node->id()] = id;
}
void StateMap::MarkDead(const Node* node) { ResetCondId(node, dead_id_); }
string StateMap::CondStateToString(const Node* node) const {
return CondStateToString(LookupCondId(node));
}
string StateMap::CondStateToString(StateMap::CondId id) const {
return DebugString(id);
}
string StateMap::AncestorStateToString(const Node* node) const {
if (auto id = LookupAncestorId(node)) {
return absl::StrCat(
"{",
absl::StrJoin(*id, ",",
[](string* output, const AncestorNode& ancestor) {
absl::StrAppend(output,
ancestor.output_tensor.node->name(),
":", ancestor.output_tensor.index);
}),
"}");
}
return "{}";
}
FunctionalizeCond::FunctionalizeCond(Graph* graph,
FunctionLibraryDefinition* library,
const NodeFilter& node_filter)
: state_map_(graph),
library_(library),
graph_(graph),
node_filter_(node_filter) {}
// Class representing the merge/switch nodes that will become a conditional.
class Conditional {
public:
Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner);
// Adds merge node that is part of this conditional.
Status AddMerge(Node* m);
// Constructs an If node from the merge nodes.
Status BuildAndReplace(
Graph* graph, FunctionLibraryDefinition* library,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
private:
// Extracts the then/else bodies: creates new graphs with the nodes
// corresponding to the nodes in the then/else branches as of this conditional
// as function bodies.
Status ExtractBodies(Graph* graph);
// Builds the arguments that are the input to the If.
Status BuildArgumentNodes();
// Builds the If node for the extracted bodies with the given predicate.
Status BuildIfNode(Graph* graph, FunctionLibraryDefinition* library);
// Adds input edges to If node.
Status AddInputEdges(
Graph* graph,
const std::unordered_map<Node*, OutputTensor>& merge_to_replacement);
// Adds output edges from If node.
// Record new output tensor for all Merge nodes in 'merge_to_replacement'.
Status AddOutputEdges(
Graph* graph,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement);
// Adds switch node that is part of this conditional.
Status AddSwitch(Node* s);
// Adds a switch node along the edge and rewire the edge to go via the switch.
Status AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph);
// Internal name of conditional. The name is based on the first merge node
// added.
string name() const;
// The FunctionalizeCond instance that created this.
FunctionalizeCond* parent_;
// Mapping between nodes and their cond state.
StateMap* state_map_;
// The predicate of the conditional.
OutputTensor predicate_;
// Shape refiner of ops in the graph.
const ShapeRefiner& refiner_;
// The predicate of the switches of the conditional. This may be different
// than predicate (which is initialized from the original graph) as the
// predicate could be the output of a newly created If node.
OutputTensor switch_predicate_;
// Switch nodes in graph that are part of this conditional.
std::set<Node*, NodeCmpByNameResourcesLast> switches_;
// Merge nodes in graph that are part of this conditional.
std::set<Node*, NodeCmpByNameResourcesLast> merges_;
// Vector of control inputs from outside the conditional to a node inside.
std::vector<Node*> external_control_inputs_;
std::vector<Node*> external_control_outputs_;
// Graphs corresponding to the then and else branch.
std::array<std::unique_ptr<Graph>, 2> bodies_;
// Maps from graph_ to the branch body's graph.
std::array<std::vector<Node*>, 2> node_maps_;
// The argument nodes created for the switches.
CondArgNodes cond_arg_nodes_;
// The constructed If node.
Node* if_node_ = nullptr;
// Whether the merge nodes of this conditional have been replaced.
bool replaced_ = false;
};
Conditional::Conditional(OutputTensor predicate, FunctionalizeCond* parent,
StateMap* cond_state_map, const ShapeRefiner& refiner)
: parent_(parent),
state_map_(cond_state_map),
predicate_(predicate),
refiner_(refiner) {}
Status Conditional::AddMerge(Node* m) {
merges_.insert(m);
return OkStatus();
}
Status Conditional::AddSwitch(Node* s) {
VLOG(5) << "Adding switch " << s->DebugString();
OutputTensor predicate;
TF_RETURN_IF_ERROR(GetSwitchPredicate(*s, &predicate));
if (switch_predicate_.node == nullptr) switch_predicate_ = predicate;
if (!(switch_predicate_ == predicate)) {
return errors::InvalidArgument(
"Merge nodes ", NodesToString(merges_),
" directly dominated by switch nodes with different predicates (",
DebugString(switch_predicate_), " vs ", DebugString(predicate), ").");
}
switches_.insert(s);
parent_->AddSwitchId(s->id());
return OkStatus();
}
Status Conditional::BuildArgumentNodes() {
VLOG(1) << "Build function arguments";
struct Hash {
size_t operator()(const std::pair<Node*, int>& item) const {
return Hash64Combine(hash<Node*>()(item.first),
std::hash<int>()(item.second));
}
};
std::unordered_map<std::pair<Node*, int>, int, Hash> input_index;
for (Node* switch_node : switches_) {
const Edge* e;
TF_RETURN_IF_ERROR(switch_node->input_edge(0, &e));
std::pair<Node*, int> key = std::make_pair(e->src(), e->src_output());
if (input_index.find(key) == input_index.end()) {
input_index[key] = cond_arg_nodes_.size();
cond_arg_nodes_.emplace_back(key.first, key.second);
}
cond_arg_nodes_.at(input_index.at(key)).switches.push_back(switch_node);
}
VLOG(5) << "CondArg nodes created: " << DebugString(cond_arg_nodes_);
int arg_count = 0;
for (CondArgNode& cond_arg_node : cond_arg_nodes_) {
DataType dtype = cond_arg_node.src->output_type(cond_arg_node.src_output);
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
TF_RETURN_IF_ERROR(
NodeBuilder(absl::StrCat("_Arg", arg_count),
FunctionLibraryDefinition::kArgOp)
.Attr("T", dtype)
.Attr("index", arg_count)
.Finalize(bodies_[branch_index].get(),
&cond_arg_node.branch_copy[branch_index]));
}
for (Node* node : cond_arg_node.switches) {
for (const Edge* e : node->out_edges()) {
if (e->IsControlEdge()) continue;
int branch_index = e->src_output();
Node* src_copy = cond_arg_node.branch_copy[branch_index];
Node* dst_copy = node_maps_[branch_index][e->dst()->id()];
// The graph may contain dead switch nodes,
if (dst_copy == nullptr) continue;
TF_RET_CHECK(dst_copy != nullptr)
<< "Unable to find copied node for " << e->dst()->DebugString()
<< " on branch " << Branch_Name(BranchType(branch_index));
// If the input goes directly to a merge then the merge has
// been replaced by a retval so the dst input is 0 instead of
// dst_input.
int dst_input = IsMerge(e->dst()) ? 0 : e->dst_input();
bodies_[branch_index]->AddEdge(src_copy, 0, dst_copy, dst_input);
}
}
++arg_count;
}
// Verify that all retvals have an input.
// TODO(jpienaar): One could add a ZerosLike in the branch that doesn't have
// input.
for (Node* m : merges_) {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bool has_input = false;
for (auto e : node_maps_[static_cast<int>(branch)][m->id()]->in_edges()) {
if (!e->IsControlEdge()) {
has_input = true;
break;
}
}
if (!has_input) {
return errors::Internal(
"Failed to functionalize control flow with merge ",
FormatNodeForError(*m), " that doesn't have input on ",
Branch_Name(branch), " branch.");
}
}
}
return OkStatus();
}
Status Conditional::AddSwitchNodeAlongEdge(const Edge* edge, BranchType branch,
Graph* graph) {
// Previously we had edge:
// src:src_output ---- edge ----> dst:dst_input
// post this we have (in graph)
// src:src_output --> switch<pred> --- new_edge --> dst:dst_input
// TODO(jpienaar): One could keep a map caching the extra switch nodes added
// to avoid adding another switch to feed a value for which a switch was
// already added.
Node* switch_node;
Node* src = edge->src();
int src_output = edge->src_output();
TF_RETURN_IF_ERROR(
NodeBuilder(graph->NewName(absl::StrCat(src->name(), "_added_switch")),
"Switch")
.Input(src, src_output)
.Input(const_cast<Node*>(predicate_.node), predicate_.index)
.Finalize(graph, &switch_node));
state_map_->ResetCondId(switch_node, state_map_->LookupCondId(src));
state_map_->ResetAncestorId(switch_node, state_map_->LookupAncestorId(src));
Node* dst = edge->dst();
int dst_input = edge->dst_input();
graph->RemoveEdge(edge);
graph->AddEdge(switch_node, static_cast<int>(branch), dst, dst_input);
return AddSwitch(switch_node);
}
Status Conditional::ExtractBodies(Graph* graph) {
VLOG(2) << "Extracting bodies for " << name();
for (auto b : {BranchType::kElseBranch, BranchType::kThenBranch}) {
bodies_[static_cast<int>(b)] =
std::make_unique<Graph>(graph->op_registry());
}
auto find_branch = [&](const Edge* e) {
const auto& id = state_map_->LookupCondId(e->src());
return IsSwitch(e->src()) ? BranchType(e->src_output())
: state_map_->FindBranchOf(id, predicate_);
};
std::array<std::vector<Node*>, 2> stacks;
VLOG(5) << "Merges: " << NodesToString(merges_);
for (Node* m : merges_) {
VLOG(5) << "For merge: " << m->DebugString() << " "
<< state_map_->CondStateToString(m);
for (auto e : m->in_edges()) {
if (e->IsControlEdge()) continue;
BranchType branch = find_branch(e);
TF_RET_CHECK(branch == BranchType::kThenBranch ||
branch == BranchType::kElseBranch)
<< "Error: " << e->src()->name()
<< " is not on either then or else branch (" << Branch_Name(branch)
<< ") for predicate " << DebugString(predicate_) << " ["
<< DebugString(state_map_->LookupCondId(e->src())) << "].";
Node* src = e->src();
if (IsSwitch(src)) {
// Switch node outputs and dependencies are handled separately.
TF_RETURN_IF_ERROR(AddSwitch(src));
} else {
stacks[static_cast<int>(branch)].push_back(src);
}
}
}
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto output = bodies_[branch_index].get();
auto& stack = stacks[branch_index];
VLOG(5) << "In branch: " << Branch_Name(branch) << " "
<< NodesToString(stack);
std::vector<bool> visited(graph->num_node_ids(), false);
node_maps_[branch_index].resize(graph->num_node_ids(), nullptr);
auto& node_map = node_maps_[branch_index];
while (!stack.empty()) {
Node* n = stack.back();
stack.pop_back();
if (visited.at(n->id())) continue;
visited[n->id()] = true;
// Verify output edges and record control edges exiting scope.
for (const Edge* e : n->out_edges()) {
Node* dst = e->dst();
if (IsMerge(dst)) continue;
Node* src = e->src();
auto dst_id = state_map_->LookupCondId(dst);
auto src_id = state_map_->LookupCondId(src);
if (dst_id != src_id) {
if (e->IsControlEdge()) {
external_control_outputs_.push_back(e->src());
} else {
// Constants are treated specially to workaround the case of
// non-dominated constant nodes.
if (!IsConstant(src)) {
// TODO(b/78882471): A node that feeds into two different
// CondState is not necessarily an error so log a warning for now
// but revisit to improve the testing to enable making this an
// error.
LOG(WARNING) << errors::InvalidArgument(
"Graph contains node ", FormatNodeForError(*src),
" that feeds into node ", FormatNodeForError(*dst),
" but these nodes are in different control contexts (",
DebugString(src_id), " vs ", DebugString(dst_id),
" (detected during out edge testing)");
}
}
}
}
// Copying incoming edges to dst node. Iterate over a copy of the edges
// as they could be mutated during iteration.
std::vector<const Edge*> in_edges(n->in_edges().begin(),
n->in_edges().end());
// Sort in_edges to make sure nodes are copied in a deterministic order.
std::sort(
in_edges.begin(), in_edges.end(), [](const Edge* a, const Edge* b) {
int a_src_output = a->src_output(), b_src_output = b->src_output();
StringPiece a_name(a->src()->name()), b_name(b->src()->name());
return std::tie(a_src_output, a_name) <
std::tie(b_src_output, b_name);
});
for (const Edge* e : in_edges) {
Node* src = e->src();
// Skip src/dst node.
if (!src->IsOp()) continue;
Node* dst = e->dst();
if (IsSwitch(src)) {
// Switch node outputs and dependencies are handled separately.
TF_RETURN_IF_ERROR(AddSwitch(src));
continue;
}
// Verify input is from the same context.
auto src_id = state_map_->LookupCondId(src);
auto dst_id = state_map_->LookupCondId(dst);
if (IsMerge(dst) || src_id == dst_id) {
// TODO(jpienaar): The merge case can be more strict.
if (node_map.at(src->id()) == nullptr) {
node_map.at(src->id()) = output->CopyNode(src);
stack.push_back(src);
}
} else if (e->IsControlEdge()) {
// Here we have a control flow edge between src and dst that are not
// in the same context. This is an external control dependency except
// for one case: where the only difference between CondId of e->src()
// and CondId of e->dst() is that e->src() has {PRED, kNeither} and
// e->dst() has {PRED, kThenBranch/kElseBranch}. This happens in
// gradients code for tf.cond(), where e->src() is a control pivot
// node for a branch and e->dst() is a data node in that branch.
bool is_external_control_input = true;
if (!state_map_->IsEmpty(src_id) && !state_map_->IsEmpty(dst_id)) {
std::vector<StateMap::CondState::value_type> diff;
std::set_symmetric_difference(
src_id->begin(), src_id->end(), dst_id->begin(), dst_id->end(),
std::back_inserter(diff), CondStateLess());
if (diff.size() == 2 && diff[0].first == diff[1].first &&
(diff[0].second == BranchType::kNeither ||
diff[1].second == BranchType::kNeither)) {
auto src_branch = src_id->find(diff[0].first);
if (src_branch != src_id->end() &&
src_branch->second == BranchType::kNeither) {
is_external_control_input = false;
}
}
}
if (is_external_control_input) {
external_control_inputs_.push_back(src);
}
} else {
// This shouldn't happen, this means we have an external data input
// not entering via a switch node. Work around this by for
// * constant nodes copy them;
// * non-constant nodes, insert a switch along the edge;
if (IsConstant(src)) {
// Check if constant node was added already. It is possible to have
// multiple uses of a constant node.
if (node_map.at(src->id()) == nullptr) {
node_map.at(src->id()) = output->CopyNode(src);
}
} else {
StateMap::CondState state = *dst_id;
state.erase(predicate_);
if (state_map_->GetCondId(state) == src_id) {
TF_RETURN_IF_ERROR(AddSwitchNodeAlongEdge(e, branch, graph));
continue;
} else {
return errors::InvalidArgument(
"Graph contains node ", FormatNodeForError(*src),
" that feeds into node ", FormatNodeForError(*dst),
" but these nodes are in different control contexts (",
DebugString(src_id), " vs ", DebugString(dst_id),
" (detected during in edge testing)");
}
}
}
Node* src_copy = node_map.at(e->src()->id());
int src_output = e->src_output();
if (node_map.at(dst->id()) == nullptr) {
node_map.at(dst->id()) = output->CopyNode(dst);
}
Node* dst_copy = node_map.at(e->dst()->id());
if (e->IsControlEdge()) {
// Skip control inputs from external context.
if (src_copy != nullptr) output->AddControlEdge(src_copy, dst_copy);
} else {
output->AddEdge(src_copy, src_output, dst_copy, e->dst_input());
}
}
}
}
// Build return values from the merge nodes.
int index = 0;
for (Node* m : merges_) {
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto& node_map = node_maps_[branch_index];
auto output = bodies_[branch_index].get();
TF_ASSIGN_OR_RETURN(node_map[m->id()],
BuildRetvalNode(output, m->output_type(0), index));
}
++index;
// Connect the input to the merge_ with the retval, except if it is a
// Switch node, which is handled separately.
for (auto e : m->in_edges()) {
if (e->IsControlEdge()) continue;
int branch_index = static_cast<int>(find_branch(e));
auto& node_map = node_maps_[branch_index];
auto output = bodies_[branch_index].get();
Node* in = e->src();
if (!IsSwitch(in)) {
if (node_map.at(in->id()) == nullptr) {
node_map[in->id()] = output->CopyNode(in);
}
output->AddEdge(node_map[in->id()], e->src_output(),
node_map.at(m->id()), 0);
}
}
}
return OkStatus();
}
Status Conditional::BuildIfNode(Graph* graph,
FunctionLibraryDefinition* library) {
VLOG(2) << "Build cond function for " << name();
NodeDebugInfo debug_info((*merges_.begin())->def());
NodeDefBuilder builder(name(), "If", library, &debug_info);
const string branch_name[] = {"else_branch", "then_branch"};
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
NameAttrList body_name;
body_name.set_name(library->UniqueFunctionName(
absl::StrCat("_functionalize_if_", branch_name[branch_index], "_")));
VLOG(3) << "FunctionalizeControlFlow (" << branch_name[branch_index]
<< "): "
<< DumpGraphToFile(
"functionalize_cond_body_" + branch_name[branch_index],
*bodies_[branch_index], nullptr);
FunctionDef body_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*bodies_[branch_index],
body_name.name(), &body_fdef));
TF_RETURN_IF_ERROR(library->AddFunctionDef(body_fdef));
builder.Attr(branch_name[branch_index], body_name);
}
VLOG(3) << "Build input type";
std::vector<NodeDefBuilder::NodeOut> inputs;
DataTypeVector in_arg_types;
for (auto& kv : cond_arg_nodes_) {
bool inserted = false;
for (const Node* arg : kv.switches) {
const Edge* in_edge;
TF_RETURN_IF_ERROR(arg->input_edge(0, &in_edge));
if (in_edge->IsControlEdge()) {
builder.ControlInput(in_edge->src()->name());
} else {
if (!inserted) {
DataType dtype = arg->input_type(0);
inputs.emplace_back(NodeDefBuilder::NodeOut(
in_edge->src()->name(), in_edge->src_output(), dtype));
in_arg_types.push_back(dtype);
inserted = true;
}
}
}
}
builder.Attr("Tin", in_arg_types);
DataTypeVector out_type;
std::vector<PartialTensorShape> output_shapes;
output_shapes.reserve(merges_.size());
for (const Node* merge : merges_) {
DataType dtype = merge->output_type(0);
TensorShapeProto shape;
if (auto* shape_ctx = refiner_.GetContext(merge)) {
shape_inference::ShapeHandle handle;
shape_ctx->ShapeHandleToProto(shape_ctx->output(0), &shape);
}
out_type.push_back(dtype);
output_shapes.push_back(shape);
}
builder.Attr("Tout", out_type);
VLOG(3) << "Build output type: " << DataTypeVectorString(out_type);
builder.Attr("output_shapes", output_shapes);
VLOG(3) << "Build output shapes: "
<< PartialTensorShapeUtils::PartialShapeListString(output_shapes);
builder.Attr("Tcond", DT_BOOL);
// Add some internal attributes which need to be propagated.
for (absl::string_view attr_name : kAttrsToPropagate) {
string attr_val;
if (GetNodeAttr(predicate_.node->def(), attr_name, &attr_val).ok()) {
builder.Attr(attr_name, attr_val);
}
}
builder.Device(predicate_.node->assigned_device_name());
// Conditional should be the first input ...
builder.Input(
NodeDefBuilder::NodeOut(predicate_.node->name(), predicate_.index,
predicate_.node->output_type(predicate_.index)));
// ... followed by the other inputs.
builder.Input(inputs);
VLOG(3) << "Build If node";
NodeDef if_def;
TF_RETURN_IF_ERROR(builder.Finalize(&if_def));
TF_ASSIGN_OR_RETURN(if_node_,
parent_->AddIfNode(if_def, *merges_.begin(), predicate_));
return OkStatus();
}
Status Conditional::AddInputEdges(
Graph* graph,
const std::unordered_map<Node*, OutputTensor>& merge_to_replacement) {
VLOG(2) << "AddInputEdges for " << if_node_->name();
int index = 0;
// Add predicate input.
if (predicate_.node->IsMerge()) {
// If the predicate is a Merge node, we should not use Merge output as
// predicate. Instead, we should use the corresponding If output in
// 'merge_to_replacement'. Otherwise, this Conditional's If node is still
// connected to the predicate Merge node; and when we call
// DeleteReachableAndDeadNodes(), the predicate Merge node and this
// Conditional's If node will be removed.
auto iter = merge_to_replacement.find(predicate_.node);
if (iter == merge_to_replacement.end()) {
return errors::Internal("Cannot find replacement for Merge node ",
predicate_.node->name());
}
graph->AddEdge(iter->second.node, iter->second.index, if_node_, index++);
} else {
graph->AddEdge(const_cast<Node*>(predicate_.node), predicate_.index,
if_node_, index++);
}
// Add function body inputs.
for (auto& arg : cond_arg_nodes_) {
if (arg.src_output == Graph::kControlSlot) {
graph->AddControlEdge(arg.src, if_node_);
} else {
graph->AddEdge(arg.src, arg.src_output, if_node_, index++);
}
}
for (Node* n : external_control_inputs_) {
graph->AddControlEdge(n, if_node_);
}
return OkStatus();
}
Status Conditional::AddOutputEdges(
Graph* graph,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement) {
VLOG(2) << "AddOutputEdges for " << if_node_->name();
int i = 0;
for (Node* node : merges_) {
TF_RETURN_IF_ERROR(parent_->AddIdentityNode(node, if_node_, i));
std::vector<const Edge*> edges(node->out_edges().begin(),
node->out_edges().end());
for (const Edge* edge : edges) {
Node* dst = edge->dst();
int dst_input = edge->dst_input();
if (edge->src_output() > 0) {
return errors::Unimplemented("Output of index (", edge->src_output(),
") of merge node ",
FormatNodeForError(*node));
}
bool control_edge = edge->IsControlEdge();
graph->RemoveEdge(edge);
if (control_edge) {
graph->AddControlEdge(if_node_, dst);
} else {
graph->AddEdge(if_node_, i, dst, dst_input);
}
}
// Record corresponding output tensor in 'merge_to_replacement'.
(*merge_to_replacement)[node] = OutputTensor{if_node_, i};
++i;
}
for (Node* n : external_control_outputs_) {
graph->AddControlEdge(if_node_, n);
}
return OkStatus();
}
Status Conditional::BuildAndReplace(
Graph* graph, FunctionLibraryDefinition* library,
std::unordered_map<Node*, OutputTensor>* merge_to_replacement) {
VLOG(1) << "Build If and replace merge nodes "
<< NodesToString(this->merges_);
if (replaced_) return OkStatus();
TF_RETURN_IF_ERROR(ExtractBodies(graph));
TF_RETURN_IF_ERROR(BuildArgumentNodes());
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Extracted bodies:";
for (auto branch : {BranchType::kElseBranch, BranchType::kThenBranch}) {
int branch_index = static_cast<int>(branch);
auto output = bodies_[branch_index].get();
LOG(INFO) << Branch_Name(branch) << ": "
<< DebugString(output->ToGraphDefDebug());
}
}
TF_RETURN_IF_ERROR(BuildIfNode(graph, library));
TF_RETURN_IF_ERROR(AddInputEdges(graph, *merge_to_replacement));
TF_RETURN_IF_ERROR(AddOutputEdges(graph, merge_to_replacement));
TF_RETURN_IF_ERROR(parent_->PropagateUpdatedState(if_node_));
// Check that the if_node doesn't feed into itself.
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CheckNodeNotInCycle(if_node_, graph->num_node_ids()),
"Converting to If failed.");
replaced_ = true;
return OkStatus();
}
string Conditional::name() const {
CHECK(!merges_.empty());
return absl::StrCat((*merges_.begin())->name(), "_if");
}
Status FunctionalizeCond::AddIdentityNode(const Node* replacee, Node* if_node,
int port) {
NodeBuilder id_builder(replacee->name(), "Identity");
id_builder.Input(if_node, port);
string outside_compilation;
if (GetNodeAttr(if_node->def(), kXlaOutsideCompilationAttr,
&outside_compilation)
.ok()) {
id_builder.Attr(kXlaOutsideCompilationAttr, outside_compilation);
}
Node* id;
TF_RETURN_IF_ERROR(id_builder.Finalize(graph_, &id));
state_map_.ResetCondId(id, state_map_.LookupCondId(if_node));
state_map_.ResetAncestorId(id, state_map_.LookupAncestorId(if_node));
return OkStatus();
}
StatusOr<Node*> FunctionalizeCond::AddIfNode(const NodeDef& def,
const Node* replacee,
const OutputTensor& predicate) {
TF_ASSIGN_OR_RETURN(Node * ret, graph_->AddNode(def));
VLOG(1) << "Adding If for " << replacee->name();
StateMap::CondId id = state_map_.LookupCondId(replacee);
if (id) {
StateMap::CondState state = *id;
state.erase(predicate);
state_map_.ResetCondId(ret, state_map_.GetCondId(state));
} else {
state_map_.ResetCondId(ret, nullptr);
}
state_map_.ResetAncestorId(ret, state_map_.LookupAncestorId(replacee));
return ret;
}
Status FunctionalizeCond::PropagateUpdatedState(const Node* replacee) {
VLOG(2) << "Propagating update state for " << replacee->name() << " "
<< state_map_.CondStateToString(replacee);
// Redo topological sort as the order could have changed.
// TODO(jpienaar): The original topological order could also be updated
// dynamically if needed.
std::vector<Node*> rev_topo_order;
GetPostOrder(*graph_, &rev_topo_order, NodeComparatorID());
// All the outputs of the new node could potentially be updated.