Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions aten/src/ATen/core/interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ namespace c10 {
_(aten, __round_to_zero_floordiv)\
_(prim, fork) \
_(prim, RaiseException) \
_(prim, Function) \
_(aten, append) \
_(aten, format) \
_(aten, __not__) \
Expand Down
18 changes: 9 additions & 9 deletions test/expect/TestFuser.test_lstm_cuda-backward.expect
Original file line number Diff line number Diff line change
Expand Up @@ -27,26 +27,26 @@ graph(%0 : Float(*, *)
%26 : Float(*, *)) {
%27 : Float(*, *) = aten::mul(%0, %26)
%28 : int[] = aten::size(%outgate)
%29 : Tensor = aten::sum_to_size(%27, %28)
%grad_self.1 : Tensor = aten::sum_to_size(%27, %28)
%30 : Float(*, *) = aten::mul(%0, %outgate)
%31 : int[] = aten::size(%26)
%32 : Tensor = aten::sum_to_size(%30, %31)
%33 : Tensor = prim::FusionGroup_0(%1, %32, %26)
%grad_other.1 : Tensor = aten::sum_to_size(%30, %31)
%33 : Tensor = prim::FusionGroup_0(%1, %grad_other.1, %26)
%34 : Tensor = prim::SumToSize(%33, %24)
%35 : Tensor = prim::SumToSize(%33, %25)
%36 : Tensor = aten::mul(%35, %cellgate)
%37 : int[] = aten::size(%ingate)
%38 : Tensor = aten::sum_to_size(%36, %37)
%grad_self.3 : Tensor = aten::sum_to_size(%36, %37)
%39 : Tensor = aten::mul(%35, %ingate)
%40 : int[] = aten::size(%cellgate)
%41 : Tensor = aten::sum_to_size(%39, %40)
%grad_other.3 : Tensor = aten::sum_to_size(%39, %40)
%42 : Tensor = aten::mul(%34, %9)
%43 : int[] = aten::size(%forgetgate)
%44 : Tensor = aten::sum_to_size(%42, %43)
%grad_self.5 : Tensor = aten::sum_to_size(%42, %43)
%45 : Tensor = aten::mul(%34, %forgetgate)
%46 : int[] = aten::size(%9)
%47 : Tensor = aten::sum_to_size(%45, %46)
%48 : Tensor = prim::FusionGroup_1(%38, %ingate, %44, %forgetgate, %41, %cellgate, %29, %outgate)
%grad_other.5 : Tensor = aten::sum_to_size(%45, %46)
%48 : Tensor = prim::FusionGroup_1(%grad_self.3, %ingate, %grad_self.5, %forgetgate, %grad_other.3, %cellgate, %grad_self.1, %outgate)
%49 : Tensor = prim::SumToSize(%48, %19)
%50 : Tensor = prim::SumToSize(%48, %17)
%51 : Tensor = prim::SumToSize(%48, %14)
Expand All @@ -61,7 +61,7 @@ graph(%0 : Float(*, *)
%60 : Float(*, *) = aten::t(%11)
%61 : Float(*, *) = aten::mm(%60, %51)
%62 : Float(*, *) = aten::t(%61)
return (%47, %49, %50, %54, %57, %59, %62);
return (%grad_other.5, %49, %50, %54, %57, %59, %62);
}
with prim::FusionGroup_0 = graph(%0 : Float(*, *)
%1 : Tensor
Expand Down
40 changes: 20 additions & 20 deletions test/expect/TestFuser.test_milstm_cuda-backward.expect
Original file line number Diff line number Diff line change
Expand Up @@ -33,58 +33,58 @@ graph(%0 : Float(*, *)
%32 : int = prim::Constant[value=1]()
%33 : Float(*, *) = aten::mul(%0, %31)
%34 : int[] = aten::size(%outgate)
%35 : Tensor = aten::sum_to_size(%33, %34)
%grad_self.1 : Tensor = aten::sum_to_size(%33, %34)
%36 : Float(*, *) = aten::mul(%0, %outgate)
%37 : int[] = aten::size(%31)
%38 : Tensor = aten::sum_to_size(%36, %37)
%39 : Tensor = prim::FusionGroup_0(%1, %38, %31)
%grad_other.1 : Tensor = aten::sum_to_size(%36, %37)
%39 : Tensor = prim::FusionGroup_0(%1, %grad_other.1, %31)
%40 : Tensor = prim::SumToSize(%39, %29)
%41 : Tensor = prim::SumToSize(%39, %30)
%42 : Tensor = aten::mul(%41, %cellgate)
%43 : int[] = aten::size(%ingate)
%44 : Tensor = aten::sum_to_size(%42, %43)
%grad_self.3 : Tensor = aten::sum_to_size(%42, %43)
%45 : Tensor = aten::mul(%41, %ingate)
%46 : int[] = aten::size(%cellgate)
%47 : Tensor = aten::sum_to_size(%45, %46)
%grad_other.3 : Tensor = aten::sum_to_size(%45, %46)
%48 : Tensor = aten::mul(%40, %10)
%49 : int[] = aten::size(%forgetgate)
%50 : Tensor = aten::sum_to_size(%48, %49)
%51 : Tensor = prim::FusionGroup_1(%44, %ingate, %50, %forgetgate, %47, %cellgate, %35, %outgate)
%grad_self.5 : Tensor = aten::sum_to_size(%48, %49)
%51 : Tensor = prim::FusionGroup_1(%grad_self.3, %ingate, %grad_self.5, %forgetgate, %grad_other.3, %cellgate, %grad_self.1, %outgate)
%52 : Tensor = prim::SumToSize(%51, %24)
%53 : Tensor = prim::SumToSize(%51, %22)
%54 : Tensor = aten::mul(%53, %Uz)
%55 : int[] = aten::size(%11)
%56 : Tensor = aten::sum_to_size(%54, %55)
%grad_self.7 : Tensor = aten::sum_to_size(%54, %55)
%57 : Tensor = aten::mul(%53, %11)
%58 : int[] = aten::size(%Uz)
%59 : Tensor = aten::sum_to_size(%57, %58)
%grad_other.7 : Tensor = aten::sum_to_size(%57, %58)
%60 : Tensor = prim::SumToSize(%51, %19)
%61 : Tensor = prim::SumToSize(%51, %20)
%62 : Tensor = aten::mul(%61, %Wx)
%63 : int[] = aten::size(%12)
%64 : Tensor = aten::sum_to_size(%62, %63)
%grad_self.9 : Tensor = aten::sum_to_size(%62, %63)
%65 : Tensor = aten::mul(%61, %12)
%66 : int[] = aten::size(%Wx)
%67 : Tensor = aten::sum_to_size(%65, %66)
%grad_other.9 : Tensor = aten::sum_to_size(%65, %66)
%68 : Tensor = aten::mul(%60, %Uz)
%69 : int[] = aten::size(%18)
%70 : Tensor = aten::sum_to_size(%68, %69)
%grad_self.11 : Tensor = aten::sum_to_size(%68, %69)
%71 : Tensor = aten::mul(%60, %18)
%72 : Tensor = aten::sum_to_size(%71, %58)
%73 : Tensor = aten::add(%59, %72, %32)
%74 : Tensor = aten::mul(%70, %Wx)
%grad_other.11 : Tensor = aten::sum_to_size(%71, %58)
%73 : Tensor = aten::add(%grad_other.7, %grad_other.11, %32)
%74 : Tensor = aten::mul(%grad_self.11, %Wx)
%75 : int[] = aten::size(%13)
%76 : Tensor = aten::sum_to_size(%74, %75)
%77 : Tensor = aten::mul(%70, %13)
%78 : Tensor = aten::sum_to_size(%77, %66)
%79 : Tensor = aten::add(%67, %78, %32)
%grad_self.13 : Tensor = aten::sum_to_size(%74, %75)
%77 : Tensor = aten::mul(%grad_self.11, %13)
%grad_other.13 : Tensor = aten::sum_to_size(%77, %66)
%79 : Tensor = aten::add(%grad_other.9, %grad_other.13, %32)
%80 : Float(*, *) = aten::t(%14)
%81 : Float(*, *) = aten::mm(%80, %73)
%82 : Float(*, *) = aten::t(%81)
%83 : Float(*, *) = aten::t(%15)
%84 : Float(*, *) = aten::mm(%83, %79)
%85 : Float(*, *) = aten::t(%84)
return (%52, %56, %64, %76, %82, %85);
return (%52, %grad_self.7, %grad_self.9, %grad_self.13, %82, %85);
}
with prim::FusionGroup_0 = graph(%0 : Float(*, *)
%1 : Tensor
Expand Down
34 changes: 17 additions & 17 deletions test/expect/TestJit.test_cpp_cuda.expect
Original file line number Diff line number Diff line change
Expand Up @@ -115,29 +115,29 @@ graph(%0 : Float(2, 3, 4)
%12 : Tensor = prim::SumToSize(%11, %6)
-> (%10, %12)
}
%13 : Tensor, %14 : Tensor = prim::GradOf[name="aten::mul"](%8)
%grad_self.2 : Tensor, %grad_other.2 : Tensor = prim::GradOf[name="aten::mul"](%8)
block0() {
%15 : Tensor = aten::mul(%8, %2)
%16 : int[] = aten::size(%4)
%17 : Tensor = aten::sum_to_size(%15, %16)
%grad_self.1 : Tensor = aten::sum_to_size(%15, %16)
%18 : Tensor = aten::mul(%8, %4)
%19 : int[] = aten::size(%2)
%20 : Tensor = aten::sum_to_size(%18, %19)
-> (%17, %20)
%grad_other.1 : Tensor = aten::sum_to_size(%18, %19)
-> (%grad_self.1, %grad_other.1)
}
%21 : Tensor = prim::AutogradAdd(%1, %13)
%22 : Tensor, %23 : Tensor = prim::GradOf[name="aten::mul"](%21)
%21 : Tensor = prim::AutogradAdd(%1, %grad_self.2)
%grad_self : Tensor, %grad_other : Tensor = prim::GradOf[name="aten::mul"](%21)
block0() {
%24 : Tensor = aten::mul(%21, %3)
%25 : int[] = aten::size(%2)
%26 : Tensor = aten::sum_to_size(%24, %25)
%grad_self.3 : Tensor = aten::sum_to_size(%24, %25)
%27 : Tensor = aten::mul(%21, %2)
%28 : int[] = aten::size(%3)
%29 : Tensor = aten::sum_to_size(%27, %28)
-> (%26, %29)
%grad_other.3 : Tensor = aten::sum_to_size(%27, %28)
-> (%grad_self.3, %grad_other.3)
}
%30 : Tensor = prim::AutogradAdd(%14, %22)
%31 : Tensor = prim::AutogradAdd(%9, %23)
%30 : Tensor = prim::AutogradAdd(%grad_other.2, %grad_self)
%31 : Tensor = prim::AutogradAdd(%9, %grad_other)
return (%30, %31);
}

Expand Down Expand Up @@ -165,24 +165,24 @@ graph(%0 : Float(*)
%8 : Tensor = prim::SumToSize(%0, %4)
-> (%8)
}
%9 : Tensor, %10 : Tensor = prim::GradOf[name="aten::mul"](%7)
%grad_self : Tensor, %grad_other : Tensor = prim::GradOf[name="aten::mul"](%7)
block0() {
%11 : Tensor = aten::mul(%7, %2)
%12 : int[] = aten::size(%3)
%13 : Tensor = aten::sum_to_size(%11, %12)
%grad_self.1 : Tensor = aten::sum_to_size(%11, %12)
%14 : Tensor = aten::mul(%7, %3)
%15 : int[] = aten::size(%2)
%16 : Tensor = aten::sum_to_size(%14, %15)
-> (%13, %16)
%grad_other.1 : Tensor = aten::sum_to_size(%14, %15)
-> (%grad_self.1, %grad_other.1)
}
%17 : Tensor = prim::AutogradAdd(%1, %9)
%17 : Tensor = prim::AutogradAdd(%1, %grad_self)
%18 : Tensor = prim::GradOf[name="aten::add"](%17)
block0() {
%19 : Tensor = aten::mul(%17, %6)
%20 : Tensor = prim::SumToSize(%19, %5)
-> (%20)
}
%21 : Tensor = prim::AutogradAdd(%10, %18)
%21 : Tensor = prim::AutogradAdd(%grad_other, %18)
return (%21);
}

4 changes: 3 additions & 1 deletion test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,9 @@ def copy_structure_and_params(m):
try:
pp, constant_table = module._python_print()
except RuntimeError as e:
if "could not export python function" not in str(e):
se = str(e)
if "could not export python function" not in se and \
"closures are not exportable" not in se:
raise
else:
return
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/passes/alias_analysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,7 @@ void AliasDb::analyze(Node* node) {
case prim::None:
case prim::BroadcastSizes:
case prim::ChunkSizes:
case prim::Function:
return analyzeCreator(node);
case prim::TupleUnpack:
case prim::TupleIndex:
Expand Down Expand Up @@ -591,6 +592,5 @@ void AliasDb::giveFreshAlias(const Value* value) {
}
addAlias(value, getFreshAlias());
}

} // namespace jit
} // namespace torch
11 changes: 11 additions & 0 deletions torch/csrc/jit/passes/python_print.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -768,6 +768,17 @@ struct PythonPrintPass {
}
stmt << ")";
} break;
case prim::Function: {
if (enforce_importable_) {
throw script::ErrorReport(node->getSourceLocation()) << "closures are not exportable";
}
auto name = genMethodName("__lambda");
std::shared_ptr<Graph> graph = node->g(attr::Subgraph);
worklist.emplace_back([graph, name, this] {
printFunctionDefinition(*graph, name);
});
stmt << "self." << name;
} break;
default: {
Symbol kind = node->kind();
if (kind.is_aten()) {
Expand Down
Loading