diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index d4cee26d5f727..71f75fd8a119c 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -13374,7 +13374,7 @@ struct NodeExtensionHelper { unsigned ExtOpc = *SExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL; // If we need an extension, we should be changing the type. - SDLoc DL(Root); + SDLoc DL(OrigOperand); auto [Mask, VL] = getMaskAndVL(Root, DAG, Subtarget); switch (OrigOperand.getOpcode()) { case ISD::ZERO_EXTEND: diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll index 7bffbaa1c21ea..a4a5917fd4f9e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll @@ -386,11 +386,11 @@ define <2 x i32> @vwadd_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwadd_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 -; CHECK-NEXT: vwadd.vv v8, v11, v10 +; CHECK-NEXT: vwadd.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -885,11 +885,11 @@ define <2 x i32> @vwadd_v2i32_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwadd_v2i32_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 -; CHECK-NEXT: vwadd.vv v8, v11, v10 +; CHECK-NEXT: vwadd.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -903,11 +903,11 @@ define <2 x i64> @vwadd_v2i64_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwadd_v2i64_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsext.vf4 v10, v8 ; CHECK-NEXT: vsext.vf4 v11, v9 -; CHECK-NEXT: vwadd.vv v8, v11, v10 +; CHECK-NEXT: vwadd.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -921,11 +921,11 @@ define <2 x i64> @vwadd_v2i64_of_v2i16(ptr %x, ptr %y) { ; CHECK-LABEL: vwadd_v2i64_of_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 -; CHECK-NEXT: vwadd.vv v8, v11, v10 +; CHECK-NEXT: vwadd.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i16>, ptr %x %b = load <2 x i16>, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll index 8779c6dd9fc38..57a72c639b334 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll @@ -386,11 +386,11 @@ define <2 x i32> @vwaddu_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwaddu_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwaddu.vv v8, v11, v10 +; CHECK-NEXT: vwaddu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -913,11 +913,11 @@ define <2 x i32> @vwaddu_v2i32_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwaddu_v2i32_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwaddu.vv v8, v11, v10 +; CHECK-NEXT: vwaddu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -931,11 +931,11 @@ define <2 x i64> @vwaddu_v2i64_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwaddu_v2i64_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vzext.vf4 v11, v9 -; CHECK-NEXT: vwaddu.vv v8, v11, v10 +; CHECK-NEXT: vwaddu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -949,11 +949,11 @@ define <2 x i64> @vwaddu_v2i64_of_v2i16(ptr %x, ptr %y) { ; CHECK-LABEL: vwaddu_v2i64_of_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwaddu.vv v8, v11, v10 +; CHECK-NEXT: vwaddu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i16>, ptr %x %b = load <2 x i16>, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll index 1e36d8d45ec16..2abd34f01c14c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll @@ -416,11 +416,11 @@ define <2 x i32> @vwmul_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwmul_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 -; CHECK-NEXT: vwmul.vv v8, v11, v10 +; CHECK-NEXT: vwmul.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll index d57c012fec1ea..bff7ef86c2896 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll @@ -392,11 +392,11 @@ define <2 x i32> @vwmulu_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwmulu_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwmulu.vv v8, v11, v10 +; CHECK-NEXT: vwmulu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll index 858ae573eb406..154093d759d6d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll @@ -386,11 +386,11 @@ define <2 x i32> @vwsub_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwsub_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vsext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v11, v9 -; CHECK-NEXT: vwsub.vv v8, v11, v10 +; CHECK-NEXT: vwsub.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll index d2d54796069bb..574c2652ccfac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll @@ -386,11 +386,11 @@ define <2 x i32> @vwsubu_v2i32_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwsubu_v2i32_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwsubu.vv v8, v11, v10 +; CHECK-NEXT: vwsubu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -900,11 +900,11 @@ define <2 x i32> @vwsubu_v2i32_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwsubu_v2i32_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwsubu.vv v8, v11, v10 +; CHECK-NEXT: vwsubu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -918,11 +918,11 @@ define <2 x i64> @vwsubu_v2i64_of_v2i8(ptr %x, ptr %y) { ; CHECK-LABEL: vwsubu_v2i64_of_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle8.v v8, (a1) -; CHECK-NEXT: vle8.v v9, (a0) +; CHECK-NEXT: vle8.v v8, (a0) +; CHECK-NEXT: vle8.v v9, (a1) ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vzext.vf4 v11, v9 -; CHECK-NEXT: vwsubu.vv v8, v11, v10 +; CHECK-NEXT: vwsubu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i8>, ptr %x %b = load <2 x i8>, ptr %y @@ -936,11 +936,11 @@ define <2 x i64> @vwsubu_v2i64_of_v2i16(ptr %x, ptr %y) { ; CHECK-LABEL: vwsubu_v2i64_of_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma -; CHECK-NEXT: vle16.v v8, (a1) -; CHECK-NEXT: vle16.v v9, (a0) +; CHECK-NEXT: vle16.v v8, (a0) +; CHECK-NEXT: vle16.v v9, (a1) ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v11, v9 -; CHECK-NEXT: vwsubu.vv v8, v11, v10 +; CHECK-NEXT: vwsubu.vv v8, v10, v11 ; CHECK-NEXT: ret %a = load <2 x i16>, ptr %x %b = load <2 x i16>, ptr %y