Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[FEATURE] Add pytest with benchmarking operator (#21088)
Browse files Browse the repository at this point in the history
* Add benchmarking operator pytest using benchmark_utils

* Unify quote mark use in benchmark
  • Loading branch information
agrabow committed Jul 15, 2022
1 parent e522bea commit ded6096
Show file tree
Hide file tree
Showing 5 changed files with 141 additions and 9 deletions.
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ List of Contributors
* [Maria Boerner](https://github.com/mariaboerner1987)
* [Zhenghui Jin](https://github.com/barry-jin)
* [Dominika Jedynak](https://github.com/DominikaJedynak)
* [Adam Grabowski](https://github.com/agrabows)

Label Bot
---------
Expand Down
8 changes: 4 additions & 4 deletions benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -779,10 +779,10 @@
"x", "condition", "a", "index", "raveL_data", "label", "grid",
"A", "B", "C", "r1", "r2", "rois", "lrs", "wds", "weights_sum_sq",
"grads_sum_sq", "mhs", "data1", "data2", "loc", "parameters", "state",
"state_cell"]
"state_cell", "tensor", "arrays", "mask", "running_mean", "running_var"]

PARAMS_OF_TYPE_NP_ARRAY = ["x1", "x2", "prototype", "object", "a", "b", "fill_value", "array", "x", "arr",
PARAMS_OF_TYPE_NP_ARRAY = ["x1", "x2", "prototype", "source_array", "object", "a", "b", "fill_value", "array", "x", "arr",
"values", "ary", "seq", "arrays", "tup", "indices", "m", "ar", "q", "p", "condition",
"arys", "v", "A", "xp", "fp", "data", "mask", "gamma", "beta", "running_mean",
"running_var", "weight", "index", "lhs", "rhs"]
"arys", "v", "A", "xp", "fp", "data", "gamma", "beta", "running_mean", "moving_mean", "moving_var",
"running_var", "weight", "index", "lhs", "rhs", "parameters", "state", "mask", "bias"]

110 changes: 110 additions & 0 deletions benchmark/opperf/utils/benchmark_operators_pytest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
import pytest

from benchmark.opperf.utils.benchmark_utils import run_benchmark_operator

test_cases = {
"reshape" : [((128,128,128), {"newshape": (128,256,-1)}),
((256,256,256), {"newshape": (256,512,-1)}),
((512,512,512), {"newshape": (512,1024,-1)}),],
"swapaxes" : [((64,128,64), {"axis1": 1, "axis2": 2}),
((128,256,128), {"axis1": 1, "axis2": 2}),
((256,512,256), {"axis1": 1, "axis2": 2})],
"activation" : [((128,128,128), {"actType": "relu"}),
((256,256,256), {"actType": "relu"}),
((512,512,512), {"actType": "relu"})],
"batch_norm" : [((128,128,128), {}),
((256,256,256), {}),
((512,512,512), {})],
"convolution" : [((16,16,16,16,16), {"numFilter": 8, "kernel": (3,3,3)}),
((32,32,16,16,16), {"numFilter": 16, "kernel": (5,5,5)}),
((32,32,32,32,32), {"numFilter": 16, "kernel": (7,7,7)})],
"add" : [((128,128,128), {}),
((256,256,256), {}),
((512,512,512), {})],
"masked_softmax" : [((128,128,128), {}),
((256,256,256), {}),
((512,512,512), {})],
"slice" : [((128,128,128), {"begin": (32,32,32), "end": (-32,-32,-32)}),
((256,256,256), {"begin": (64,64,64), "end": (-64,-64,-64)}),
((512,512,512), {"begin": (96,96,96), "end": (-96,-96,-96)})],
"fully_connected" : [((20,20,20,20), {"numHidden": 30}),
((60,60,60,60), {"numHidden": 60}),
((90,90,90,90), {"numHidden": 90}),],
"batch_dot" : [((10,10,10), {"matrix1": (20,30), "matrix2": (30,40)}),
((20,20,20), {"matrix1": (40,50), "matrix2": (50,60)}),
((40,40,40), {"matrix1": (60,70), "matrix2": (70,80)})]
}

def generate_test_cases():
tests = []
for op_name, cases in test_cases.items():
for case in cases:
tests.append((op_name, case[0], case[1]))
return tests

def generate_test_ids():
test_ids = []
for op_name, cases in test_cases.items():
for case in cases:
s = op_name + "-shape_"
for i in range(len(case[0])):
s += str(case[0][i])
if (i != len(case[0])-1):
s += "x"
params = case[1].items()
if len(params) != 0:
s += "-params"
for key, value in params:
s += "_" + str(key) + "_"
if isinstance(value, tuple):
for i in range(len(value)):
s += str(value[i])
if (i != len(value)-1):
s += "x"
else:
s += str(value)
test_ids.append(s)
return test_ids

generate_inputs = {
"reshape" : lambda shape, metadata: {"newshape": metadata["newshape"], "shape": metadata["newshape"]},
"swapaxes" : lambda shape, metadata: {"axis1": metadata["axis1"], "axis2": metadata["axis2"],
"dim1": metadata["axis1"], "dim2": metadata["axis2"]},
"activation" : lambda shape, metadata: {"act_type": metadata["actType"]},
"batch_norm" : lambda shape, metadata: {"gamma": (shape[1],), "beta": (shape[1],), "running_mean": (shape[1],), "running_var": (shape[1],),
"moving_mean": (shape[1],), "moving_var": (shape[1],)},
"convolution" : lambda shape, metadata: {"weight": (metadata["numFilter"], shape[1]) + metadata["kernel"], "kernel": metadata["kernel"],
"bias": (metadata["numFilter"],), "num_filter": metadata["numFilter"]},
"masked_softmax" : lambda shape, metadata: {"mask": mx.np.array(round(mx.np.random.rand(*shape)), dtype="bool")},
"fully_connected" : lambda shape, metadata: {"weight": (metadata["numHidden"], shape[-1]), "bias": (metadata["numHidden"],),
"num_hidden": metadata["numHidden"], "flatten": False},
"batch_dot" : lambda shape, metadata: {"lhs": shape + metadata["matrix1"], "a": shape + metadata["matrix1"],
"rhs": shape + metadata["matrix2"], "b": shape + metadata["matrix2"]},
"slice" : lambda shape, metadata: {"begin": metadata["begin"], "end": metadata["end"]}
}

@pytest.mark.parametrize(argnames=("op_name, shape, params"), argvalues=generate_test_cases(), ids=generate_test_ids())
def test(op_name, shape, params):
if op_name in generate_inputs.keys():
additional_inputs = generate_inputs[op_name](shape,params)
else:
additional_inputs = {}
run_benchmark_operator(name=op_name, size=shape, additional_inputs=additional_inputs, profiler="python")
25 changes: 22 additions & 3 deletions benchmark/opperf/utils/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,25 @@ def get_mx_np_ndarray(ctx, in_tensor, dtype, initializer, attach_grad=True):
tensor.wait_to_read()
return tensor

def adjust_op_name(module, name):
np_to_nd_func = {
"batch_norm": "BatchNorm",
"fully_connected": "FullyConnected",
"activation": "Activation",
"convolution": "Convolution" }
nd_to_np_func = {
"BatchNorm": "batch_norm",
"FullyConnected": "fully_connected",
"Activation": "activation",
"Convolution": "convolution" }

if (module == mx.nd and (hasattr(mx.np, name) or hasattr(mx.npx, name)) and name in np_to_nd_func.keys()):
return np_to_nd_func[name]
elif ((module == mx.np or module == mx.npx) and hasattr(mx.nd, name) and name in nd_to_np_func.keys()):
return nd_to_np_func[name]
else:
return name

def parse_input_ndarray(input_dict):
"""Parse input for ndarray and extract array shape for better readability
Expand Down Expand Up @@ -242,18 +261,18 @@ def run_benchmark_operator(name, size = (128,128), additional_inputs = {},
modules = [mx.nd, mx.np, mx.npx]
responses = []
for module in modules:
name = adjust_op_name(module, name)
if hasattr(module, name):
function = getattr(module, name)
args = inspect.getargspec(function).args
args = inspect.signature(function).parameters.keys()
inputs = {}
for arg in args:
if arg in additional_inputs.keys():
inputs.update({arg: additional_inputs[arg]})
elif arg in arg_list[module]:
inputs.update({arg:size})
res = run_performance_test(function, run_backward=run_backward, dtype=dtype, ctx=ctx,
inputs=[inputs],
warmup=warmup, runs=runs, profiler=profiler)
inputs=[inputs], warmup=warmup, runs=runs, profiler=profiler)
responses.append(res)
else:
responses.append(str(module.__name__) + " does not have operator " + name)
Expand Down
6 changes: 4 additions & 2 deletions benchmark/opperf/utils/ndarray_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,11 @@ def get_mx_ndarray(ctx, in_tensor, dtype, initializer, attach_grad=True):
elif isinstance(in_tensor, list):
tensor = nd.array(in_tensor, ctx=ctx, dtype=dtype)
elif isinstance(in_tensor, np.ndarray):
tensor = nd.array(in_tensor, ctx=ctx, dtype=dtype)
tensor = nd.array(in_tensor)
elif isinstance(in_tensor, mx.np.ndarray):
tensor = in_tensor.as_nd_ndarray()
elif isinstance(in_tensor, nd.NDArray):
tensor = in_tensor.as_in_context(ctx).astype(dtype=dtype)
tensor = in_tensor.as_in_context(ctx)
else:
raise ValueError("Invalid input type for creating input tensor. Input can be tuple() of shape or Numpy Array or"
" MXNet NDArray. Given - ", in_tensor)
Expand Down

0 comments on commit ded6096

Please sign in to comment.