Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SDAG] Refine MMO size when converting masked load/store to normal lo…
…ad/store After D113888 / 32b6c17 the MMO size of a masked loads/store is unknown. When we are converting back to a standard load/store because the mask is known all ones, we can refine that to the correct size from the size of the vector being loaded/stored. Differential Revision: https://reviews.llvm.org/D114582
- Loading branch information
1 parent
e6d0b85
commit 5d7efd4
Showing
2 changed files
with
45 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py | ||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=avx2 -stop-after=finalize-isel | FileCheck %s | ||
|
||
define void @split_masked_store(<8 x double>* %0) { | ||
; CHECK-LABEL: name: split_masked_store | ||
; CHECK: bb.0.entry: | ||
; CHECK-NEXT: liveins: $rdi | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi | ||
; CHECK-NEXT: [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0 | ||
; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool) | ||
; CHECK-NEXT: VMASKMOVPDYmr [[COPY]], 1, $noreg, 32, $noreg, killed [[VMOVAPSYrm]], [[AVX_SET0_]] :: (store unknown-size into %ir.0 + 32, align 8) | ||
; CHECK-NEXT: VMOVUPDYmr [[COPY]], 1, $noreg, 0, $noreg, [[AVX_SET0_]] :: (store (s256) into %ir.0, align 8) | ||
; CHECK-NEXT: RET 0 | ||
entry: | ||
call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> zeroinitializer, <8 x double>* %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>) | ||
ret void | ||
} | ||
|
||
define <8 x double> @split_masked_load(<8 x double>* %0) { | ||
; CHECK-LABEL: name: split_masked_load | ||
; CHECK: bb.0.entry: | ||
; CHECK-NEXT: liveins: $rdi | ||
; CHECK-NEXT: {{ $}} | ||
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rdi | ||
; CHECK-NEXT: [[VMOVAPSYrm:%[0-9]+]]:vr256 = VMOVAPSYrm $rip, 1, $noreg, %const.0, $noreg :: (load (s256) from constant-pool) | ||
; CHECK-NEXT: [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm killed [[VMOVAPSYrm]], [[COPY]], 1, $noreg, 32, $noreg :: (load unknown-size from %ir.0 + 32, align 8) | ||
; CHECK-NEXT: [[VMOVUPDYrm:%[0-9]+]]:vr256 = VMOVUPDYrm [[COPY]], 1, $noreg, 0, $noreg :: (load (s256) from %ir.0, align 8) | ||
; CHECK-NEXT: $ymm0 = COPY [[VMOVUPDYrm]] | ||
; CHECK-NEXT: $ymm1 = COPY [[VMASKMOVPDYrm]] | ||
; CHECK-NEXT: RET 0, $ymm0, $ymm1 | ||
entry: | ||
%x = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %0, i32 8, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false>, <8 x double> poison) | ||
ret <8 x double> %x | ||
} | ||
|
||
declare <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>) | ||
declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>) |