diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c index 84559e9edb9a38..3a5628d7f57e47 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -31,21 +31,21 @@ DEFINE_STRUCT(bool) // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <2 x i64>* [[ARRAYIDX]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2:!tbaa !.*]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <4 x i64>* [[ARRAYIDX]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2:!tbaa !.*]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <8 x i64>* [[ARRAYIDX]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2:!tbaa !.*]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-512-NEXT: ret [[TMP1]] // svint64_t read_int64(struct struct_int64 *s) { @@ -55,31 +55,31 @@ svint64_t read_int64(struct struct_int64 *s) { // CHECK-128-LABEL: @write_int64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]] +// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <2 x i64>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x i64> [[TMP1]], <2 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-128-NEXT: store <2 x i64> [[TMP1]], <2 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_int64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]] +// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <4 x i64>* -// CHECK-256-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load <4 x i64>, <4 x i64>* [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <4 x i64> [[TMP1]], <4 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-256-NEXT: store <4 x i64> [[TMP1]], <4 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_int64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA5:!tbaa !.*]] +// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <8 x i64>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_INT64:%.*]], %struct.struct_int64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <8 x i64> [[TMP1]], <8 x i64>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-512-NEXT: store <8 x i64> [[TMP1]], <8 x i64>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-512-NEXT: ret void // void write_int64(struct struct_int64 *s, svint64_t x) { @@ -94,21 +94,21 @@ void write_int64(struct struct_int64 *s, svint64_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <2 x double>* [[ARRAYIDX]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <4 x double>* [[ARRAYIDX]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[ARRAYIDX]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svfloat64_t read_float64(struct struct_float64 *s) { @@ -118,31 +118,31 @@ svfloat64_t read_float64(struct struct_float64 *s) { // CHECK-128-LABEL: @write_float64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <2 x double>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x double> [[TMP1]], <2 x double>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-128-NEXT: store <2 x double> [[TMP1]], <2 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_float64( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <4 x double>* -// CHECK-256-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load <4 x double>, <4 x double>* [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <4 x double> [[TMP1]], <4 x double>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-256-NEXT: store <4 x double> [[TMP1]], <4 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_float64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <8 x double>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_FLOAT64:%.*]], %struct.struct_float64* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <8 x double> [[TMP1]], <8 x double>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-512-NEXT: store <8 x double> [[TMP1]], <8 x double>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-512-NEXT: ret void // void write_float64(struct struct_float64 *s, svfloat64_t x) { @@ -157,21 +157,21 @@ void write_float64(struct struct_float64 *s, svfloat64_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat>* [[ARRAYIDX]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <16 x bfloat>* [[ARRAYIDX]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <32 x bfloat>* [[ARRAYIDX]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) { @@ -181,31 +181,31 @@ svbfloat16_t read_bfloat16(struct struct_bfloat16 *s) { // CHECK-128-LABEL: @write_bfloat16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <8 x bfloat>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <8 x bfloat> [[TMP1]], <8 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-128-NEXT: store <8 x bfloat> [[TMP1]], <8 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bfloat16( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <16 x bfloat>* -// CHECK-256-NEXT: [[TMP1:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load <16 x bfloat>, <16 x bfloat>* [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-256-NEXT: store <16 x bfloat> [[TMP1]], <16 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-256-NEXT: store <16 x bfloat> [[TMP1]], <16 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bfloat16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <32 x bfloat>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BFLOAT16:%.*]], %struct.struct_bfloat16* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-512-NEXT: store <32 x bfloat> [[TMP1]], <32 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA2]] +// CHECK-512-NEXT: store <32 x bfloat> [[TMP1]], <32 x bfloat>* [[ARRAYIDX]], align 16, [[TBAA6]] // CHECK-512-NEXT: ret void // void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { @@ -220,21 +220,21 @@ void write_bfloat16(struct struct_bfloat16 *s, svbfloat16_t x) { // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast <2 x i8>* [[ARRAYIDX]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_bool( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <4 x i8>* [[ARRAYIDX]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[ARRAYIDX]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[TMP0]], align 2, [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_bool(struct struct_bool *s) { @@ -244,33 +244,33 @@ svbool_t read_bool(struct struct_bool *s) { // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] +// CHECK-128-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA15:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 -// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* [[ARRAYIDX]], align 2, [[TBAA2]] +// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* [[ARRAYIDX]], align 2, [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: // CHECK-256-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] +// CHECK-256-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA15:!tbaa !.*]] // CHECK-256-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to i32* -// CHECK-256-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 16, [[TBAA2]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 16, [[TBAA6]] // CHECK-256-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1 // CHECK-256-NEXT: [[TMP2:%.*]] = bitcast [3 x <4 x i8>]* [[Y]] to i32* -// CHECK-256-NEXT: store i32 [[TMP1]], i32* [[TMP2]], align 2, [[TBAA2]] +// CHECK-256-NEXT: store i32 [[TMP1]], i32* [[TMP2]], align 2, [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[X_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA11:!tbaa !.*]] +// CHECK-512-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA15:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to i64* -// CHECK-512-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA2]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA6]] // CHECK-512-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1 // CHECK-512-NEXT: [[TMP2:%.*]] = bitcast [3 x <8 x i8>]* [[Y]] to i64* -// CHECK-512-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 2, [[TBAA2]] +// CHECK-512-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 2, [[TBAA6]] // CHECK-512-NEXT: ret void // void write_bool(struct struct_bool *s, svbool_t x) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c index 1c08e46681fbc8..5442d58e96bead 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -30,13 +30,13 @@ svint32_t sizeless_callee(svint32_t x) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * // CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA2:!tbaa !.*]] -// CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[X_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[X_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[X_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] -// CHECK-NEXT: store [[TMP2]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA5:!tbaa !.*]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] +// CHECK-NEXT: store [[TMP2]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* -// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -52,7 +52,7 @@ fixed_int32_t fixed_caller(fixed_int32_t x) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[X]] to * // CHECK-NEXT: store [[X_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA2]] +// CHECK-NEXT: [[X1:%.*]] = load <16 x i32>, <16 x i32>* [[X]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[X1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -68,19 +68,19 @@ fixed_int32_t fixed_callee(fixed_int32_t x) { // CHECK-NEXT: [[COERCE_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca <16 x i32>, align 64 -// CHECK-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA5]] +// CHECK-NEXT: store [[X:%.*]], * [[X_ADDR]], align 16, [[TBAA9]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[X_ADDR]] to <16 x i32>* -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA6]] // CHECK-NEXT: [[COERCE_0__SROA_CAST:%.*]] = bitcast * [[COERCE_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[COERCE_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[COERCE_COERCE]], align 16 // CHECK-NEXT: [[CALL:%.*]] = call @fixed_callee( [[TMP2]]) // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * // CHECK-NEXT: store [[CALL]], * [[TMP3]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[TMP4]], <16 x i32>* [[SAVED_CALL_RVALUE]], align 64, [[TBAA2]] +// CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[TMP4]], <16 x i32>* [[SAVED_CALL_RVALUE]], align 64, [[TBAA6]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <16 x i32>* [[SAVED_CALL_RVALUE]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE]], align 64, [[TBAA2]] +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[CASTFIXEDSVE]], align 64, [[TBAA6]] // CHECK-NEXT: ret [[TMP5]] // svint32_t sizeless_caller(svint32_t x) { @@ -101,21 +101,21 @@ svint32_t sizeless_caller(svint32_t x) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[OP22]], <16 x i32>* [[OP2_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP22:%.*]] = load <16 x i32>, <16 x i32>* [[OP2]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[OP22]], <16 x i32>* [[OP2_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32>* [[OP2_ADDR]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP6]], [[TMP3]], [[TMP5]]) -// CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]] +// CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* -// CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP8]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -135,21 +135,21 @@ fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA2]] -// CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA2]] -// CHECK-NEXT: store <8 x double> [[OP22]], <8 x double>* [[OP2_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP22:%.*]] = load <8 x double>, <8 x double>* [[OP2]], align 16, [[TBAA6]] +// CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: store <8 x double> [[OP22]], <8 x double>* [[OP2_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x double>* [[OP2_ADDR]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP7:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP6]], [[TMP3]], [[TMP5]]) -// CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-NEXT: store [[TMP7]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* -// CHECK-NEXT: [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP8:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP8]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -170,23 +170,23 @@ fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_ // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1]] to i64* -// CHECK-NEXT: [[OP113:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP113:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP2]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[OP2]] to i64* -// CHECK-NEXT: [[OP224:%.*]] = load i64, i64* [[TMP3]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP224:%.*]] = load i64, i64* [[TMP3]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to i64* -// CHECK-NEXT: store i64 [[OP113]], i64* [[TMP4]], align 16, [[TBAA2]] +// CHECK-NEXT: store i64 [[OP113]], i64* [[TMP4]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to i64* -// CHECK-NEXT: store i64 [[OP224]], i64* [[TMP5]], align 16, [[TBAA2]] +// CHECK-NEXT: store i64 [[OP224]], i64* [[TMP5]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP7:%.*]] = load , * [[TMP6]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP7:%.*]] = load , * [[TMP6]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to * -// CHECK-NEXT: [[TMP9:%.*]] = load , * [[TMP8]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP9:%.*]] = load , * [[TMP8]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP7]], [[TMP9]]) -// CHECK-NEXT: store [[TMP10]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-NEXT: store [[TMP10]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-NEXT: [[TMP11:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to i64* -// CHECK-NEXT: [[TMP12:%.*]] = load i64, i64* [[TMP11]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP12:%.*]] = load i64, i64* [[TMP11]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP13:%.*]] = bitcast * [[RETVAL_COERCE]] to i64* // CHECK-NEXT: store i64 [[TMP12]], i64* [[TMP13]], align 16 // CHECK-NEXT: [[TMP14:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -208,15 +208,15 @@ fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP11:%.*]] = load <16 x i32>, <16 x i32>* [[OP1]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[OP11]], <16 x i32>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP3]], [[TMP2]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]] +// CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* -// CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP5:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP5]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -234,15 +234,15 @@ fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA2]] -// CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP11:%.*]] = load <8 x double>, <8 x double>* [[OP1]], align 16, [[TBAA6]] +// CHECK-NEXT: store <8 x double> [[OP11]], <8 x double>* [[OP1_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP3]], [[TMP2]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA7]] +// CHECK-NEXT: store [[TMP4]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* -// CHECK-NEXT: [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP5:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP5]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -261,15 +261,15 @@ fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1]] to i64* -// CHECK-NEXT: [[OP112:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[OP112:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to i64* -// CHECK-NEXT: store i64 [[OP112]], i64* [[TMP2]], align 16, [[TBAA2]] +// CHECK-NEXT: store i64 [[OP112]], i64* [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[TMP3]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[TMP3]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP4]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP5]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] +// CHECK-NEXT: store [[TMP5]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13]] // CHECK-NEXT: [[TMP6:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to i64* -// CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP7:%.*]] = load i64, i64* [[TMP6]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP8:%.*]] = bitcast * [[RETVAL_COERCE]] to i64* // CHECK-NEXT: store i64 [[TMP7]], i64* [[TMP8]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -289,9 +289,9 @@ fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA5]] +// CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <16 x i32>* -// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP2]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -307,9 +307,9 @@ fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) { // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA7]] +// CHECK-NEXT: store [[TMP1]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA11]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x double>* -// CHECK-NEXT: [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load <8 x double>, <8 x double>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP2]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -324,9 +324,9 @@ fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] +// CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA13]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to i64* -// CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = bitcast * [[RETVAL_COERCE]] to i64* // CHECK-NEXT: store i64 [[TMP2]], i64* [[TMP3]], align 16 // CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c index e65537cead104b..17267d6038e491 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -17,10 +17,10 @@ typedef int32_t gnu_int32_t __attribute__((vector_size(N / 8))); // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA2:!tbaa !.*]] -// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: ret [[TMP2]] // svint32_t to_svint32_t(fixed_int32_t type) { @@ -31,9 +31,9 @@ svint32_t to_svint32_t(fixed_int32_t type) { // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA5:!tbaa !.*]] +// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to <16 x i32>* -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -49,10 +49,10 @@ fixed_int32_t from_svint32_t(svint32_t type) { // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <8 x double>, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x double>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA2]] -// CHECK-NEXT: store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x double>, <8 x double>* [[TYPE]], align 16, [[TBAA6]] +// CHECK-NEXT: store <8 x double> [[TYPE1]], <8 x double>* [[TYPE_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x double>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: ret [[TMP2]] // svfloat64_t to_svfloat64_t(fixed_float64_t type) { @@ -63,9 +63,9 @@ svfloat64_t to_svfloat64_t(fixed_float64_t type) { // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to <8 x double>* -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load <8 x double>, <8 x double>* [[TMP0]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x double>* // CHECK-NEXT: store <8 x double> [[TMP1]], <8 x double>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -82,11 +82,11 @@ fixed_float64_t from_svfloat64_t(svfloat64_t type) { // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE]] to i64* -// CHECK-NEXT: [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE12:%.*]] = load i64, i64* [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to i64* -// CHECK-NEXT: store i64 [[TYPE12]], i64* [[TMP2]], align 16, [[TBAA2]] +// CHECK-NEXT: store i64 [[TYPE12]], i64* [[TMP2]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[TMP3]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[TMP3]], align 16, [[TBAA6]] // CHECK-NEXT: ret [[TMP4]] // svbool_t to_svbool_t(fixed_bool_t type) { @@ -97,9 +97,9 @@ svbool_t to_svbool_t(fixed_bool_t type) { // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to i64* -// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast * [[RETVAL_COERCE]] to i64* // CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 16 // CHECK-NEXT: [[TMP3:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -112,10 +112,10 @@ fixed_bool_t from_svbool_t(svbool_t type) { // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[TYPE_ADDR]], align 16, [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i32>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] // CHECK-NEXT: ret [[TMP2]] // svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) { @@ -125,10 +125,10 @@ svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) { // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA5]] +// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA9]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to <16 x i32>* -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) { @@ -138,7 +138,7 @@ gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) { // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <16 x i32>* // CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -153,8 +153,8 @@ fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) { // CHECK-NEXT: [[TYPE:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA2]] -// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA2]] +// CHECK-NEXT: [[TYPE1:%.*]] = load <16 x i32>, <16 x i32>* [[TYPE]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[TYPE1]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c index 28464ed4af2b72..5babb9c7c410b1 100644 --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -22,19 +22,19 @@ fixed_bool_t global_bool; // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA2:!tbaa !.*]] +// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <2 x i64>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-128-NEXT: store <2 x i64> [[TMP1]], <2 x i64>* @global_i64, align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 16, [[TBAA10:!tbaa !.*]] +// CHECK-128-NEXT: store <2 x i64> [[TMP1]], <2 x i64>* @global_i64, align 16, [[TBAA10]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA2:!tbaa !.*]] +// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA6:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <8 x i64>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-512-NEXT: store <8 x i64> [[TMP1]], <8 x i64>* @global_i64, align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load <8 x i64>, <8 x i64>* [[TMP0]], align 16, [[TBAA10:!tbaa !.*]] +// CHECK-512-NEXT: store <8 x i64> [[TMP1]], <8 x i64>* @global_i64, align 16, [[TBAA10]] // CHECK-512-NEXT: ret void // void write_global_i64(svint64_t v) { global_i64 = v; } @@ -42,19 +42,19 @@ void write_global_i64(svint64_t v) { global_i64 = v; } // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <8 x bfloat>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA6]] -// CHECK-128-NEXT: store <8 x bfloat> [[TMP1]], <8 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <8 x bfloat>, <8 x bfloat>* [[TMP0]], align 16, [[TBAA10]] +// CHECK-128-NEXT: store <8 x bfloat> [[TMP1]], <8 x bfloat>* @global_bf16, align 16, [[TBAA10]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA7:!tbaa !.*]] +// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA11:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <32 x bfloat>* -// CHECK-512-NEXT: [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA6]] -// CHECK-512-NEXT: store <32 x bfloat> [[TMP1]], <32 x bfloat>* @global_bf16, align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load <32 x bfloat>, <32 x bfloat>* [[TMP0]], align 16, [[TBAA10]] +// CHECK-512-NEXT: store <32 x bfloat> [[TMP1]], <32 x bfloat>* @global_bf16, align 16, [[TBAA10]] // CHECK-512-NEXT: ret void // void write_global_bf16(svbfloat16_t v) { global_bf16 = v; } @@ -62,19 +62,19 @@ void write_global_bf16(svbfloat16_t v) { global_bf16 = v; } // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-128-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA6]] -// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* @global_bool, align 2, [[TBAA6]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load <2 x i8>, <2 x i8>* [[TMP0]], align 16, [[TBAA10]] +// CHECK-128-NEXT: store <2 x i8> [[TMP1]], <2 x i8>* @global_bool, align 2, [[TBAA10]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[V_ADDR:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-512-NEXT: store [[V:%.*]], * [[V_ADDR]], align 16, [[TBAA13:!tbaa !.*]] // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast * [[V_ADDR]] to i64* -// CHECK-512-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA6]] -// CHECK-512-NEXT: store i64 [[TMP1]], i64* bitcast (<8 x i8>* @global_bool to i64*), align 2, [[TBAA6]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 16, [[TBAA10]] +// CHECK-512-NEXT: store i64 [[TMP1]], i64* bitcast (<8 x i8>* @global_bool to i64*), align 2, [[TBAA10]] // CHECK-512-NEXT: ret void // void write_global_bool(svbool_t v) { global_bool = v; } @@ -85,36 +85,36 @@ void write_global_bool(svbool_t v) { global_bool = v; } // CHECK-128-LABEL: @read_global_i64( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<2 x i64>* @global_i64 to *), align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<2 x i64>* @global_i64 to *), align 16, [[TBAA10]] // CHECK-128-NEXT: ret [[TMP0]] // // CHECK-512-LABEL: @read_global_i64( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x i64>* @global_i64 to *), align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x i64>* @global_i64 to *), align 16, [[TBAA10]] // CHECK-512-NEXT: ret [[TMP0]] // svint64_t read_global_i64() { return global_i64; } // CHECK-128-LABEL: @read_global_bf16( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x bfloat>* @global_bf16 to *), align 16, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x bfloat>* @global_bf16 to *), align 16, [[TBAA10]] // CHECK-128-NEXT: ret [[TMP0]] // // CHECK-512-LABEL: @read_global_bf16( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<32 x bfloat>* @global_bf16 to *), align 16, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<32 x bfloat>* @global_bf16 to *), align 16, [[TBAA10]] // CHECK-512-NEXT: ret [[TMP0]] // svbfloat16_t read_global_bf16() { return global_bf16; } // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<2 x i8>* @global_bool to *), align 2, [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load , * bitcast (<2 x i8>* @global_bool to *), align 2, [[TBAA10]] // CHECK-128-NEXT: ret [[TMP0]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x i8>* @global_bool to *), align 2, [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load , * bitcast (<8 x i8>* @global_bool to *), align 2, [[TBAA10]] // CHECK-512-NEXT: ret [[TMP0]] // svbool_t read_global_bool() { return global_bool; }