Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions benchmarks/static_runtime/test_static_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,52 @@ TEST(StaticRuntime, Max) {
testStaticRuntime(src_max_pointwise, {input, input_other}, {large_input, large_input_other});
}

TEST(StaticRuntime, Mean) {
const auto src_default = R"JIT(
def forward(self, input):
return torch.mean(input).clone()
)JIT";
const auto src_dtype = R"JIT(
def forward(self, input, dtype: int):
return torch.mean(input, dtype=dtype).clone()
)JIT";
const auto src_dim = R"JIT(
def forward(self, input, dim: List[int]):
return torch.mean(input, dim).clone()
)JIT";
const auto src_dim_keepdim = R"JIT(
def forward(self, input, dim: List[int]):
return torch.mean(input, dim, keepdim=True).clone()
)JIT";
const auto src_dim_dtype = R"JIT(
def forward(self, input, dim: List[int], dtype: int):
return torch.mean(input, dim, dtype=dtype).clone()
)JIT";

auto input = at::randn({2, 3, 2});
auto large_input = at::randn({8, 7, 6, 8});

std::vector<IValue> args_default = {input};
std::vector<IValue> args_dtype = {input, torch::kFloat};
std::vector<IValue> args_dim = {input, c10::List<int64_t>{0, 2}};
std::vector<IValue> args_dim_keepdim = {input, c10::List<int64_t>{1, 2}};
std::vector<IValue> args_dim_dtype = {input, c10::List<int64_t>{0, 1}, torch::kBFloat16};

testStaticRuntime(src_default, args_default);
testStaticRuntime(src_dtype, args_dtype);
testStaticRuntime(src_dim, args_dim);
testStaticRuntime(src_dim_keepdim, args_dim_keepdim);
testStaticRuntime(src_dim_dtype, args_dim_dtype);

std::vector<IValue> large_args_dim = {large_input, c10::List<int64_t>{0, 3}};
std::vector<IValue> large_args_dim_keepdim = {large_input, c10::List<int64_t>{1, 2}};
std::vector<IValue> large_args_dim_dtype = {large_input, c10::List<int64_t>{1, 3}, torch::kBFloat16};

testStaticRuntime(src_dim, args_dim, large_args_dim);
testStaticRuntime(src_dim_keepdim, args_dim_keepdim, large_args_dim_keepdim);
testStaticRuntime(src_dim_dtype, args_dim_dtype, large_args_dim_dtype);
}

TEST(StaticRuntime, Sigmoid) {
const auto sigmoid_script = R"JIT(
def forward(self, inp: Tensor):
Expand Down
37 changes: 37 additions & 0 deletions torch/csrc/jit/runtime/static/ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1702,6 +1702,43 @@ REGISTER_OPERATOR_FUNCTOR(aten::sum, aten_sum, [](Node* n) -> SROperator {
return nullptr;
});

REGISTER_OPERATOR_FUNCTOR(aten::mean, aten_mean, [](Node* n) -> SROperator {
if (n->matches(torch::schema(
"aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"))) {
return [](ProcessedNode* p_node) {
const auto& self = p_node->Input(0).toTensor();
const auto dim = p_node->Input(1).toDimVector();
const bool keepdim = p_node->Input(2).toBool();
const auto dtype = p_node->Input(3).toOptional<at::ScalarType>();
if (p_node->Output(0).isNone()) {
p_node->Output(0) = create_empty_from(
self, dtype.value_or(self.dtype().toScalarType()));
}
auto& output = p_node->Output(0).toTensor();
fastResizeToZero(output);
at::cpu::mean_out(output, self, dim, keepdim, dtype);
};
}

if (n->matches(torch::schema(
"aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor"))) {
return [](ProcessedNode* p_node) {
const auto& self = p_node->Input(0).toTensor();
const auto dtype = p_node->Input(1).toOptional<at::ScalarType>();
if (p_node->Output(0).isNone()) {
p_node->Output(0) = create_empty_from(
self, dtype.value_or(self.dtype().toScalarType()));
}
auto& output = p_node->Output(0).toTensor();
fastResizeToZero(output);
at::cpu::mean_out(output, self, /*dim=*/{}, /*keepdim=*/false, dtype);
};
}

LogAndDumpSchema(n);
return nullptr;
});

REGISTER_OPERATOR_FUNCTOR(aten::repeat, aten_repeat, [](Node* n) -> SROperator {
if (!n->matches(torch::schema(
"aten::repeat(Tensor self, int[] repeats) -> Tensor"))) {
Expand Down