Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -58,12 +58,12 @@ def test_remove_io_quant_ops_pass__cifarnet():
)

nodes = list(exec_prog.exported_program().graph.nodes)
assert len(nodes) == 17
assert len(nodes) == 11
assert (
nodes[0].meta["val"].dtype == torch.int8
), "Input tensor doesn't have type INT8."
assert (
nodes[16].meta["val"][0].dtype == torch.int8
nodes[10].meta["val"][0].dtype == torch.int8
), "Output tensor doesn't have type INT8."

assert (
Expand Down
6 changes: 3 additions & 3 deletions backends/nxp/tests/test_integration.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2024 NXP
# Copyright 2024-2025 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand Down Expand Up @@ -43,8 +43,8 @@ def test_cifarnet():

delegation_info = get_delegation_info(exec_prog.exported_program().graph_module)
assert delegation_info.num_delegated_subgraphs == 1
assert delegation_info.num_non_delegated_nodes == 17
assert delegation_info.num_delegated_nodes == 42
assert delegation_info.num_non_delegated_nodes == 11
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice!

assert delegation_info.num_delegated_nodes == 45

nodes = list(exec_prog.exported_program().graph.nodes)
assert nodes[2].name == "quantized_decomposed_quantize_per_tensor_default"
Binary file modified examples/nxp/experimental/cifar_net/cifar_net.pth
Binary file not shown.
9 changes: 3 additions & 6 deletions examples/nxp/experimental/cifar_net/cifar_net.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright 2024 NXP
# Copyright 2024-2025 NXP
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
Expand Down Expand Up @@ -57,7 +57,7 @@ class CifarNetModel(nn.Module):
def __init__(self):
super().__init__()

self.conv1 = nn.Conv2d(8, 32, 5)
self.conv1 = nn.Conv2d(3, 32, 5)
self.conv2 = nn.Conv2d(32, 32, 5)
self.conv3 = nn.Conv2d(32, 64, 5)
self.pool1 = nn.MaxPool2d(2, 2)
Expand All @@ -66,10 +66,7 @@ def __init__(self):
self.softmax = nn.Softmax(1)

def forward(self, x):

# Neutron Backend does not yet have passses for automated padding if number of channels does not
# fit to Neutron constrains (#channels == #MAC units). So define the model explicitly tailored for Neutron-C-64.
x = F.pad(x, (2, 2, 2, 2, 0, 5))
x = F.pad(x, (2, 2, 2, 2))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is there remaining padding?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The remaining padding ensures that the output of convolution has the same size as the original x before padding. It has the same effect as using padding="same" in the convolutions.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This type of padding is fused later into convolution.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this part of the original model definition?

Copy link
Collaborator

@robert-kalmar robert-kalmar Aug 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes. The previous padding to 8 channels was due to Neutron NPU constrain. In the meanwhile the Neutron converter got the capability to autopad, hence it is not necessary anymore.

For the remaining padding, it might have been used the "same" padding in the Convolution https://docs.pytorch.org/docs/stable/generated/torch.nn.Conv2d.html.

Filed a ticket here: #13470

x = self.conv1(x)
x = self.pool1(x)

Expand Down
Loading