Skip to content

[Inductor] [CPU] Torchbench model dlrm performance regression > 10% on ww02.3 #93503

@yudongsi

Description

@yudongsi

🐛 Describe the bug

Compare with the TorchInductor CPU Performance Dashboard on ww02.2, there is a performance regression on Torchbench model dlrm on ww02.3 as bellow:

ww02.3 ww02.2
batch_size speedup inductor eager batch_size speedup inductor eager speedup ratio eager ratio inductor ratio
1 1.0664 0.0008148 0.000868903 1 1.1007 0.0007154 0.000787441 0.97 0.91 0.88

WW02.3 SW info:

SW Nightly commit Master/Main commit
Pytorch fac4361 73e5379
Torchbench / 354378b
torchaudio ecc2781 4a037b0
torchtext 112d757 c7cc5fc
torchvision ac06efe 35f68a0
torchdata 049fb62 c0934b9

WW02.2 SW info:

SW Nightly commit Master/Main commit
Pytorch fac4361 73e5379
Torchbench / ff361c6
torchaudio 1c98d76 0be8423
torchtext 6cbfd3e 7c7b640
torchvision b7637f6 0dceac0
torchdata 0d9aa37 0a0ae5d

grapy.py

grapy.py of this model on ww02.3
GRAPH_INDEX:0
class <lambda>(torch.nn.Module):
    def forward(self, arg0_1: f32[512, 512], arg1_1: f32[512], arg2_1: f32[2310369, 1], arg3_1: f32[64, 512], arg4_1: f32[64], arg5_1: f32[1982689, 1], arg6_1: f32[1000000, 64], arg7_1: f32[1000000, 64], arg8_1: f32[1000000, 64], arg9_1: f32[1000000, 64], arg10_1: f32[1000000, 64], arg11_1: f32[1000000, 64], arg12_1: f32[1000000, 64], arg13_1: f32[1000000, 64], arg14_1: f32[1024, 100], arg15_1: f32[1024], arg16_1: f32[2638049, 1], arg17_1: f32[1024, 1024], arg18_1: f32[1024], arg19_1: f32[3490017, 1], arg20_1: f32[1024, 1024], arg21_1: f32[1024], arg22_1: f32[3490017, 1], arg23_1: f32[1, 1024], arg24_1: f32[1], arg25_1: f32[2310369, 1], arg26_1: f32[2048, 512], arg27_1: i64[8, 2048], arg28_1: i64[204790], arg29_1: i64[204789], arg30_1: i64[204793], arg31_1: i64[204790], arg32_1: i64[204793], arg33_1: i64[204784], arg34_1: i64[204786], arg35_1: i64[204792]):
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:274, code: return layers(x)
        _mkl_linear: f32[2048, 512] = torch.ops.mkl._mkl_linear.default(arg26_1, arg2_1, arg0_1, arg1_1, 2048);  arg26_1 = arg2_1 = arg0_1 = arg1_1 = None
        relu: f32[2048, 512] = torch.ops.aten.relu.default(_mkl_linear);  _mkl_linear = None
        _mkl_linear_1: f32[2048, 64] = torch.ops.mkl._mkl_linear.default(relu, arg5_1, arg3_1, arg4_1, 2048);  relu = arg5_1 = arg3_1 = arg4_1 = None
        relu_1: f32[2048, 64] = torch.ops.aten.relu.default(_mkl_linear_1);  _mkl_linear_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 0)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag = torch.ops.aten._embedding_bag.default(arg6_1, arg28_1, select, False, 0, True, None);  arg6_1 = arg28_1 = select = None
        getitem: f32[2048, 64] = _embedding_bag[0]
        getitem_1: i64[0] = _embedding_bag[1]
        getitem_2: i64[2048] = _embedding_bag[2]
        getitem_3: i64[2048] = _embedding_bag[3];  _embedding_bag = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_1: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 1)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_1 = torch.ops.aten._embedding_bag.default(arg7_1, arg29_1, select_1, False, 0, True, None);  arg7_1 = arg29_1 = select_1 = None
        getitem_4: f32[2048, 64] = _embedding_bag_1[0]
        getitem_5: i64[0] = _embedding_bag_1[1]
        getitem_6: i64[2048] = _embedding_bag_1[2]
        getitem_7: i64[2048] = _embedding_bag_1[3];  _embedding_bag_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_2: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 2)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_2 = torch.ops.aten._embedding_bag.default(arg8_1, arg30_1, select_2, False, 0, True, None);  arg8_1 = arg30_1 = select_2 = None
        getitem_8: f32[2048, 64] = _embedding_bag_2[0]
        getitem_9: i64[0] = _embedding_bag_2[1]
        getitem_10: i64[2048] = _embedding_bag_2[2]
        getitem_11: i64[2048] = _embedding_bag_2[3];  _embedding_bag_2 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_3: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 3)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_3 = torch.ops.aten._embedding_bag.default(arg9_1, arg31_1, select_3, False, 0, True, None);  arg9_1 = arg31_1 = select_3 = None
        getitem_12: f32[2048, 64] = _embedding_bag_3[0]
        getitem_13: i64[0] = _embedding_bag_3[1]
        getitem_14: i64[2048] = _embedding_bag_3[2]
        getitem_15: i64[2048] = _embedding_bag_3[3];  _embedding_bag_3 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_4: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 4)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_4 = torch.ops.aten._embedding_bag.default(arg10_1, arg32_1, select_4, False, 0, True, None);  arg10_1 = arg32_1 = select_4 = None
        getitem_16: f32[2048, 64] = _embedding_bag_4[0]
        getitem_17: i64[0] = _embedding_bag_4[1]
        getitem_18: i64[2048] = _embedding_bag_4[2]
        getitem_19: i64[2048] = _embedding_bag_4[3];  _embedding_bag_4 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_5: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 5)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_5 = torch.ops.aten._embedding_bag.default(arg11_1, arg33_1, select_5, False, 0, True, None);  arg11_1 = arg33_1 = select_5 = None
        getitem_20: f32[2048, 64] = _embedding_bag_5[0]
        getitem_21: i64[0] = _embedding_bag_5[1]
        getitem_22: i64[2048] = _embedding_bag_5[2]
        getitem_23: i64[2048] = _embedding_bag_5[3];  _embedding_bag_5 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_6: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 6)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_6 = torch.ops.aten._embedding_bag.default(arg12_1, arg34_1, select_6, False, 0, True, None);  arg12_1 = arg34_1 = select_6 = None
        getitem_24: f32[2048, 64] = _embedding_bag_6[0]
        getitem_25: i64[0] = _embedding_bag_6[1]
        getitem_26: i64[2048] = _embedding_bag_6[2]
        getitem_27: i64[2048] = _embedding_bag_6[3];  _embedding_bag_6 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_7: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 7);  arg27_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_7 = torch.ops.aten._embedding_bag.default(arg13_1, arg35_1, select_7, False, 0, True, None);  arg13_1 = arg35_1 = select_7 = None
        getitem_28: f32[2048, 64] = _embedding_bag_7[0]
        getitem_29: i64[0] = _embedding_bag_7[1]
        getitem_30: i64[2048] = _embedding_bag_7[2]
        getitem_31: i64[2048] = _embedding_bag_7[3];  _embedding_bag_7 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:306, code: T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
        cat: f32[2048, 576] = torch.ops.aten.cat.default([relu_1, getitem, getitem_4, getitem_8, getitem_12, getitem_16, getitem_20, getitem_24, getitem_28], 1);  getitem = getitem_4 = getitem_8 = getitem_12 = getitem_16 = getitem_20 = getitem_24 = getitem_28 = None
        view: f32[2048, 9, 64] = torch.ops.aten.view.default(cat, [2048, -1, 64]);  cat = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:308, code: Z = torch.bmm(T, torch.transpose(T, 1, 2))
        permute: f32[2048, 64, 9] = torch.ops.aten.permute.default(view, [0, 2, 1])
        bmm: f32[2048, 9, 9] = torch.ops.aten.bmm.default(view, permute);  view = permute = None
        
        # No stacktrace found for following nodes
        _tensor_constant0 = self._tensor_constant0
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:319, code: li = torch.tensor([i for i in range(ni) for j in range(i + offset)], device=x.device)
        lift_fresh_copy: i64[36] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0);  _tensor_constant0 = None
        
        # No stacktrace found for following nodes
        _tensor_constant1 = self._tensor_constant1
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:320, code: lj = torch.tensor([j for i in range(nj) for j in range(i + offset)], device=x.device)
        lift_fresh_copy_1: i64[36] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant1);  _tensor_constant1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:321, code: Zflat = Z[:, li, lj]
        slice_1: f32[2048, 9, 9] = torch.ops.aten.slice.Tensor(bmm, 0, 0, 9223372036854775807);  bmm = None
        index: f32[2048, 36] = torch.ops.aten.index.Tensor(slice_1, [None, lift_fresh_copy, lift_fresh_copy_1]);  slice_1 = lift_fresh_copy = lift_fresh_copy_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:323, code: R = torch.cat([x] + [Zflat], dim=1)
        cat_1: f32[2048, 100] = torch.ops.aten.cat.default([relu_1, index], 1);  relu_1 = index = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:274, code: return layers(x)
        _mkl_linear_2: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(cat_1, arg16_1, arg14_1, arg15_1, 2048);  cat_1 = arg16_1 = arg14_1 = arg15_1 = None
        relu_2: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_2);  _mkl_linear_2 = None
        _mkl_linear_3: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(relu_2, arg19_1, arg17_1, arg18_1, 2048);  relu_2 = arg19_1 = arg17_1 = arg18_1 = None
        relu_3: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_3);  _mkl_linear_3 = None
        _mkl_linear_4: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(relu_3, arg22_1, arg20_1, arg21_1, 2048);  relu_3 = arg22_1 = arg20_1 = arg21_1 = None
        relu_4: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_4);  _mkl_linear_4 = None
        _mkl_linear_5: f32[2048, 1] = torch.ops.mkl._mkl_linear.default(relu_4, arg25_1, arg23_1, arg24_1, 2048);  relu_4 = arg25_1 = arg23_1 = arg24_1 = None
        relu_5: f32[2048, 1] = torch.ops.aten.relu.default(_mkl_linear_5);  _mkl_linear_5 = None
        return (relu_5,)
        
grapy.py of this model on ww02.2
GRAPH_INDEX:0
class <lambda>(torch.nn.Module):
    def forward(self, arg0_1: f32[512, 512], arg1_1: f32[512], arg2_1: f32[2310369, 1], arg3_1: f32[64, 512], arg4_1: f32[64], arg5_1: f32[1982689, 1], arg6_1: f32[1000000, 64], arg7_1: f32[1000000, 64], arg8_1: f32[1000000, 64], arg9_1: f32[1000000, 64], arg10_1: f32[1000000, 64], arg11_1: f32[1000000, 64], arg12_1: f32[1000000, 64], arg13_1: f32[1000000, 64], arg14_1: f32[1024, 100], arg15_1: f32[1024], arg16_1: f32[2638049, 1], arg17_1: f32[1024, 1024], arg18_1: f32[1024], arg19_1: f32[3490017, 1], arg20_1: f32[1024, 1024], arg21_1: f32[1024], arg22_1: f32[3490017, 1], arg23_1: f32[1, 1024], arg24_1: f32[1], arg25_1: f32[2310369, 1], arg26_1: f32[2048, 512], arg27_1: i64[8, 2048], arg28_1: i64[204790], arg29_1: i64[204789], arg30_1: i64[204793], arg31_1: i64[204790], arg32_1: i64[204793], arg33_1: i64[204784], arg34_1: i64[204786], arg35_1: i64[204792]):
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:274, code: return layers(x)
        _mkl_linear: f32[2048, 512] = torch.ops.mkl._mkl_linear.default(arg26_1, arg2_1, arg0_1, arg1_1, 2048);  arg26_1 = arg2_1 = arg0_1 = arg1_1 = None
        relu: f32[2048, 512] = torch.ops.aten.relu.default(_mkl_linear);  _mkl_linear = None
        _mkl_linear_1: f32[2048, 64] = torch.ops.mkl._mkl_linear.default(relu, arg5_1, arg3_1, arg4_1, 2048);  relu = arg5_1 = arg3_1 = arg4_1 = None
        relu_1: f32[2048, 64] = torch.ops.aten.relu.default(_mkl_linear_1);  _mkl_linear_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 0)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag = torch.ops.aten._embedding_bag.default(arg6_1, arg28_1, select, False, 0, True, None);  arg6_1 = arg28_1 = select = None
        getitem: f32[2048, 64] = _embedding_bag[0]
        getitem_1: i64[0] = _embedding_bag[1]
        getitem_2: i64[2048] = _embedding_bag[2]
        getitem_3: i64[2048] = _embedding_bag[3];  _embedding_bag = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_1: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 1)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_1 = torch.ops.aten._embedding_bag.default(arg7_1, arg29_1, select_1, False, 0, True, None);  arg7_1 = arg29_1 = select_1 = None
        getitem_4: f32[2048, 64] = _embedding_bag_1[0]
        getitem_5: i64[0] = _embedding_bag_1[1]
        getitem_6: i64[2048] = _embedding_bag_1[2]
        getitem_7: i64[2048] = _embedding_bag_1[3];  _embedding_bag_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_2: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 2)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_2 = torch.ops.aten._embedding_bag.default(arg8_1, arg30_1, select_2, False, 0, True, None);  arg8_1 = arg30_1 = select_2 = None
        getitem_8: f32[2048, 64] = _embedding_bag_2[0]
        getitem_9: i64[0] = _embedding_bag_2[1]
        getitem_10: i64[2048] = _embedding_bag_2[2]
        getitem_11: i64[2048] = _embedding_bag_2[3];  _embedding_bag_2 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_3: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 3)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_3 = torch.ops.aten._embedding_bag.default(arg9_1, arg31_1, select_3, False, 0, True, None);  arg9_1 = arg31_1 = select_3 = None
        getitem_12: f32[2048, 64] = _embedding_bag_3[0]
        getitem_13: i64[0] = _embedding_bag_3[1]
        getitem_14: i64[2048] = _embedding_bag_3[2]
        getitem_15: i64[2048] = _embedding_bag_3[3];  _embedding_bag_3 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_4: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 4)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_4 = torch.ops.aten._embedding_bag.default(arg10_1, arg32_1, select_4, False, 0, True, None);  arg10_1 = arg32_1 = select_4 = None
        getitem_16: f32[2048, 64] = _embedding_bag_4[0]
        getitem_17: i64[0] = _embedding_bag_4[1]
        getitem_18: i64[2048] = _embedding_bag_4[2]
        getitem_19: i64[2048] = _embedding_bag_4[3];  _embedding_bag_4 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_5: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 5)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_5 = torch.ops.aten._embedding_bag.default(arg11_1, arg33_1, select_5, False, 0, True, None);  arg11_1 = arg33_1 = select_5 = None
        getitem_20: f32[2048, 64] = _embedding_bag_5[0]
        getitem_21: i64[0] = _embedding_bag_5[1]
        getitem_22: i64[2048] = _embedding_bag_5[2]
        getitem_23: i64[2048] = _embedding_bag_5[3];  _embedding_bag_5 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_6: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 6)
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_6 = torch.ops.aten._embedding_bag.default(arg12_1, arg34_1, select_6, False, 0, True, None);  arg12_1 = arg34_1 = select_6 = None
        getitem_24: f32[2048, 64] = _embedding_bag_6[0]
        getitem_25: i64[0] = _embedding_bag_6[1]
        getitem_26: i64[2048] = _embedding_bag_6[2]
        getitem_27: i64[2048] = _embedding_bag_6[3];  _embedding_bag_6 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:288, code: sparse_offset_group_batch = lS_o[k]
        select_7: i64[2048] = torch.ops.aten.select.int(arg27_1, 0, 7);  arg27_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:295, code: V = E(sparse_index_group_batch, sparse_offset_group_batch)
        _embedding_bag_7 = torch.ops.aten._embedding_bag.default(arg13_1, arg35_1, select_7, False, 0, True, None);  arg13_1 = arg35_1 = select_7 = None
        getitem_28: f32[2048, 64] = _embedding_bag_7[0]
        getitem_29: i64[0] = _embedding_bag_7[1]
        getitem_30: i64[2048] = _embedding_bag_7[2]
        getitem_31: i64[2048] = _embedding_bag_7[3];  _embedding_bag_7 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:306, code: T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
        cat: f32[2048, 576] = torch.ops.aten.cat.default([relu_1, getitem, getitem_4, getitem_8, getitem_12, getitem_16, getitem_20, getitem_24, getitem_28], 1);  getitem = getitem_4 = getitem_8 = getitem_12 = getitem_16 = getitem_20 = getitem_24 = getitem_28 = None
        view: f32[2048, 9, 64] = torch.ops.aten.view.default(cat, [2048, -1, 64]);  cat = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:308, code: Z = torch.bmm(T, torch.transpose(T, 1, 2))
        permute: f32[2048, 64, 9] = torch.ops.aten.permute.default(view, [0, 2, 1])
        bmm: f32[2048, 9, 9] = torch.ops.aten.bmm.default(view, permute);  view = permute = None
        
        # No stacktrace found for following nodes
        _tensor_constant0 = self._tensor_constant0
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:319, code: li = torch.tensor([i for i in range(ni) for j in range(i + offset)], device=x.device)
        lift_fresh_copy: i64[36] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0);  _tensor_constant0 = None
        
        # No stacktrace found for following nodes
        _tensor_constant1 = self._tensor_constant1
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:320, code: lj = torch.tensor([j for i in range(nj) for j in range(i + offset)], device=x.device)
        lift_fresh_copy_1: i64[36] = torch.ops.aten.lift_fresh_copy.default(_tensor_constant1);  _tensor_constant1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:321, code: Zflat = Z[:, li, lj]
        slice_1: f32[2048, 9, 9] = torch.ops.aten.slice.Tensor(bmm, 0, 0, 9223372036854775807);  bmm = None
        index: f32[2048, 36] = torch.ops.aten.index.Tensor(slice_1, [None, lift_fresh_copy, lift_fresh_copy_1]);  slice_1 = lift_fresh_copy = lift_fresh_copy_1 = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:323, code: R = torch.cat([x] + [Zflat], dim=1)
        cat_1: f32[2048, 100] = torch.ops.aten.cat.default([relu_1, index], 1);  relu_1 = index = None
        
        # File: /workspace/benchmark/torchbenchmark/models/dlrm/dlrm_s_pytorch.py:274, code: return layers(x)
        _mkl_linear_2: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(cat_1, arg16_1, arg14_1, arg15_1, 2048);  cat_1 = arg16_1 = arg14_1 = arg15_1 = None
        relu_2: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_2);  _mkl_linear_2 = None
        _mkl_linear_3: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(relu_2, arg19_1, arg17_1, arg18_1, 2048);  relu_2 = arg19_1 = arg17_1 = arg18_1 = None
        relu_3: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_3);  _mkl_linear_3 = None
        _mkl_linear_4: f32[2048, 1024] = torch.ops.mkl._mkl_linear.default(relu_3, arg22_1, arg20_1, arg21_1, 2048);  relu_3 = arg22_1 = arg20_1 = arg21_1 = None
        relu_4: f32[2048, 1024] = torch.ops.aten.relu.default(_mkl_linear_4);  _mkl_linear_4 = None
        _mkl_linear_5: f32[2048, 1] = torch.ops.mkl._mkl_linear.default(relu_4, arg25_1, arg23_1, arg24_1, 2048);  relu_4 = arg25_1 = arg23_1 = arg24_1 = None
        relu_5: f32[2048, 1] = torch.ops.aten.relu.default(_mkl_linear_5);  _mkl_linear_5 = None
        return (relu_5,)

Minified repro

python -m torch.backends.xeon.run_cpu --core_list 0 --ncores_per_instance 1 benchmarks/dynamo/torchbench.py --performance --float32 -dcpu --output=inductor_log/ww022.csv -n50 --inductor  --no-skip --dashboard --only dlrm  --cold_start_latency --batch_size 1 --threads 1

cc @ezyang @soumith @msaroufim @wconstab @ngimel @bdhirsh

Metadata

Metadata

Assignees

Type

No type

Projects

Status

Done

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions