Skip to content

Commit

Permalink
[FIX][ONNX][Relay] onnx converter on matmul with scalar; bring back n…
Browse files Browse the repository at this point in the history
…n.matmul check (#13448)

This PR brings 2 bug fixes:
1. ONNX converter for matmul: ONNX matmul follows NumPy [rules](https://numpy.org/doc/stable/reference/generated/numpy.matmul.html):
> If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
> If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed.

The (my) previous fix #11174 did not consider the second rule (append 1 dimension for the rhs vector).

2. Relay's `nn.matmul` takes 2-D matrices and the checker was removed in a recent PR #13287. This PR puts the checker back to prevent process crashes (make it a readable TVMError) for readability (and also for that the CI in ise-uiuc/nnsmith#64 won't be terminated while using TVM-10).
  • Loading branch information
ganler committed Nov 21, 2022
1 parent d663207 commit 41b0400
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 6 deletions.
15 changes: 13 additions & 2 deletions python/tvm/relay/frontend/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -358,8 +358,19 @@ def matmul_out_dtype(inputs, out_dtype):
)
return _op.reshape(output, fold_constant(final_shape))

if a_rank == 1:
return _op.squeeze(_op.nn.matmul(_op.expand_dims(inputs[0], axis=0), inputs[1]), axis=[0])
if a_rank == 1 or b_rank == 1:
axis = []
if a_rank == 1:
lhs = _op.expand_dims(inputs[0], axis=0)
axis.append(0)
else:
lhs = inputs[0]
if b_rank == 1:
rhs = _op.expand_dims(inputs[1], axis=1)
axis.append(-1)
else:
rhs = inputs[1]
return _op.squeeze(_op.nn.matmul(lhs, rhs), axis=axis)

# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
Expand Down
1 change: 1 addition & 0 deletions src/relay/op/nn/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ bool MatmulRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
oshape.Set(oshape.size() - 1, tensor_b_elements / dshape[dshape.size() - 1]);
// Otherwise just pull it out of the tensor_b shape directly.
} else {
ICHECK(static_cast<int>(tensor_b->shape.size()) == 2);
if (param->auto_scheduler_rewritten_layout.size() == 0 &&
param->meta_schedule_original_shape.size() == 0) {
// ensure inner dimension matches between data and weight. If one inner
Expand Down
7 changes: 3 additions & 4 deletions tests/python/frontend/onnx/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1298,10 +1298,7 @@ def test_matmul(target, dev):
"""test_matmul"""

def test_one_matmul(a_shape, b_shape):
if len(a_shape) == 1:
out_shape = [b_shape[1]]
else:
out_shape = [a_shape[0], b_shape[1]]
out_shape = np.matmul(np.zeros(a_shape), np.zeros(b_shape)).shape

a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
Expand All @@ -1323,6 +1320,8 @@ def test_one_matmul(a_shape, b_shape):

test_one_matmul((4, 3), (3, 4))
test_one_matmul((3,), (3, 1))
test_one_matmul((1, 3), (3,))
test_one_matmul((3,), (3,))


@tvm.testing.parametrize_targets
Expand Down
5 changes: 5 additions & 0 deletions tests/python/relay/test_op_level1.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,6 +602,11 @@ def test_matmul_type_check():
y = relay.nn.matmul(x, w)
yy = run_infer_type(y)

i0 = relay.var("i0", shape=(1, 1), dtype="float32")
i1 = relay.var("i1", shape=(1,), dtype="float32")
with pytest.raises(tvm.TVMError):
run_infer_type(relay.nn.matmul(i0, i1))


@tvm.testing.uses_gpu
def test_matmul(executor_kind):
Expand Down

0 comments on commit 41b0400

Please sign in to comment.