Skip to content
This repository has been archived by the owner on Jan 10, 2023. It is now read-only.

Commit

Permalink
NFC: For TFPartition pass, enhanced debugging prints, by annotating S…
Browse files Browse the repository at this point in the history
…IL code

with marking enums, and also printing internal states like tensor start and end
points, and the result tensors.

This helps with understanding and debugging the TFPartition pass.

Example output:

---- ANALYSIS STATE FOR FUNCTION $S19partition_study_tmp26testPartition1StraightlineyyF ----------
Tensor start point:   %0 = float_literal $Builtin.FPIEEE32, 0x3F800000 // 1 // user: %1
Tensor end point:   strong_retain %9 : $TensorHandle<Float>         // id: %10
SIL with markings:

bb0:
[Move]    %0 = float_literal $Builtin.FPIEEE32, 0x3F800000 // 1 // user: %1
[Move]    %1 = builtin "__tfop_tfc.scalarToTensor,$in"(%0 : $Builtin.FPIEEE32) : $TensorHandle<Float> // users: %14, %13, %6, %9, %5, %3, %15, %12, %9, %8, %7, %4, %2
          strong_retain %1 : $TensorHandle<Float>         // id: %2
          strong_retain %1 : $TensorHandle<Float>         // id: %3
          strong_retain %1 : $TensorHandle<Float>         // id: %4
          strong_retain %1 : $TensorHandle<Float>         // id: %5
          strong_retain %1 : $TensorHandle<Float>         // id: %6
          strong_retain %1 : $TensorHandle<Float>         // id: %7
          strong_release %1 : $TensorHandle<Float>        // id: %8
[Move]    %9 = builtin "__tfop_Add,$in,$in"(%1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>) : $TensorHandle<Float> // users: %46, %41, %31, %33, %30, %29, %16, %11, %10
          strong_retain %9 : $TensorHandle<Float>         // id: %10
          strong_release %9 : $TensorHandle<Float>        // id: %11
          strong_release %1 : $TensorHandle<Float>        // id: %12
          strong_release %1 : $TensorHandle<Float>        // id: %13
          strong_release %1 : $TensorHandle<Float>        // id: %14
          strong_release %1 : $TensorHandle<Float>        // id: %15
          strong_retain %9 : $TensorHandle<Float>         // id: %16
          // function_ref implicit closure #1 in Tensor.array.getter
  %17 = function_ref @$S10TensorFlow0A0V5arrayAA11ShapedArrayVyxGvgSSyXKfu_ : $@convention(thin) () -> @owned String // user: %18
<ommited some code>
---- END OF ANALYSIS STATE FOR FUNCTION ----------

---- PARTITION STATE FOR FUNCTION $S19partition_study_tmp26testPartition1StraightlineyyF ----------
(Possibly updated) tensor end point:   strong_retain %9 : $TensorHandle<Float>         // id: %10
There are 1 result values:
  %9 = builtin "__tfop_Add,$in,$in"(%1 : $TensorHandle<Float>, %1 : $TensorHandle<Float>) : $TensorHandle<Float> // users: %46, %41, %31, %33, %30, %29, %16, %11, %10
---- END OF PARTITION STATE FOR FUNCTION ----------

PiperOrigin-RevId: 195049251
  • Loading branch information
mhong committed May 2, 2018
1 parent ad10efc commit 4e27ff2
Showing 1 changed file with 60 additions and 0 deletions.
60 changes: 60 additions & 0 deletions lib/SILOptimizer/Mandatory/TFPartition.cpp
Expand Up @@ -593,6 +593,12 @@ enum class Marking {
Delete, // This instruction is simply deleted (e.g. debug_value)
};

// Each string should be no more than 6 characters, so that a string like
// "[Delete]\t" can be aligned with "[Arg]\t" or just "\t".
static const char *markingStr[]{
"Copy", "Move", "Send", "Arg", "Delete",
};

class TFFunctionPartition {
public:
SILFunction &fn;
Expand Down Expand Up @@ -1685,6 +1691,10 @@ void TFFunctionPartition::markTensorBBArgumentsForDeletion() {
}
}

static const char* markingEnumToStr(Marking m) {
return markingStr[static_cast<int>(m)];
}

/// Scan the function looking for blocks with tensor operations in them. As
/// we find them, mark them as "to-be-partitioned", which marks (transitive)
/// data and control dependencies.
Expand Down Expand Up @@ -1841,6 +1851,40 @@ bool TFFunctionPartition::markFunction() {
}
assert(tensorEndPoint && "Failed to compute an end point");

if (auto *outs = getTFDumpIntermediateStream()) {
*outs << "\n---- ANALYSIS STATE FOR FUNCTION " << fn.getName()
<< " ----------\n";
*outs << "Tensor start point: ";
tensorStartPoint->print(*outs);
*outs << "Tensor end point: ";
tensorEndPoint->print(*outs);

*outs << "SIL with markings:\n";
for (auto &BB : fn.getBlocks()) {
SILPrintContext Ctx(*outs);
*outs << "\n" << Ctx.getID(&BB) << ":\n";
for (auto *arg : BB.getArguments()) {
if (markedBBArguments.count(arg)) {
auto it = markedBBArguments.find(arg);
assert (it != markedBBArguments.end());
*outs << "[" << markingEnumToStr(it->second.first) << "]";
}
*outs << "\t";
arg->print(*outs);
}
for (auto &I : BB) {
if (markedInstructions.count(&I)) {
*outs << "[" << markingEnumToStr(markedInstructions[&I]) << "]";
}
*outs << "\t";
I.print(*outs);
}
}

*outs << "---- END OF ANALYSIS STATE FOR FUNCTION ----------\n";
outs->flush();
}

return true;
}

Expand Down Expand Up @@ -2715,6 +2759,9 @@ bool PartitionCloner::finalizeOriginal() {
// Next, add sends back of any values that are used by the host code, and
// remove the original instruction.
for (auto inst : instructionsToRemove) {
// These insts cannot contain types like unconditional branch, which do not
// have getResults() defined.
assert(!isa<NonValueInstruction>(inst));
for (auto result : inst->getResults())
if (!handleHostReferencesOfMovedValue(result,
getUserSourceLocation(inst)))
Expand Down Expand Up @@ -3322,6 +3369,19 @@ auto TFFunctionPartition::partition() -> PartitionedTensorProgram {
}
}

if (auto *outs = getTFDumpIntermediateStream()) {
*outs << "\n---- PARTITION STATE FOR FUNCTION " << fn.getName()
<< " ----------\n";
*outs << "(Possibly updated) tensor end point: ";
tensorEndPoint->print(*outs);
*outs << "There are " << resultValues.size() << " result values:\n";
for (auto& resultValue : resultValues) {
resultValue->print(*outs);
}
*outs << "---- END OF PARTITION STATE FOR FUNCTION ----------\n\n";
outs->flush();
}

// Insert the start/finish and any terminate runtime calls.
// FIXME: Order resultValues based on the ordering of their defining
// instructions first, so that the generated tensor program has a
Expand Down

0 comments on commit 4e27ff2

Please sign in to comment.