Skip to content

Commit

Permalink
[Clang] Convert some tests to opaque pointers (NFC)
Browse files Browse the repository at this point in the history
  • Loading branch information
nikic committed Feb 17, 2023
1 parent afcf70a commit f3fa108
Show file tree
Hide file tree
Showing 16 changed files with 333 additions and 354 deletions.
40 changes: 20 additions & 20 deletions clang/test/CodeGen/arm64-microsoft-arguments.cpp
@@ -1,9 +1,9 @@
// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-windows -ffreestanding -emit-llvm -O0 \
// RUN: %clang_cc1 -triple aarch64-windows -ffreestanding -emit-llvm -O0 \
// RUN: -x c++ -o - %s | FileCheck %s

// Pass and return for type size <= 8 bytes.
// CHECK: define {{.*}} i64 @{{.*}}f1{{.*}}()
// CHECK: call i64 {{.*}}func1{{.*}}(i64 %3)
// CHECK: call i64 {{.*}}func1{{.*}}(i64 %0)
struct S1 {
int a[2];
};
Expand All @@ -16,7 +16,7 @@ S1 f1() {

// Pass and return type size <= 16 bytes.
// CHECK: define {{.*}} [2 x i64] @{{.*}}f2{{.*}}()
// CHECK: call [2 x i64] {{.*}}func2{{.*}}([2 x i64] %3)
// CHECK: call [2 x i64] {{.*}}func2{{.*}}([2 x i64] %0)
struct S2 {
int a[4];
};
Expand All @@ -28,8 +28,8 @@ S2 f2() {
}

// Pass and return for type size > 16 bytes.
// CHECK: define {{.*}} void @{{.*}}f3{{.*}}(%struct.S3* noalias sret(%struct.S3) align 4 %agg.result)
// CHECK: call void {{.*}}func3{{.*}}(%struct.S3* sret(%struct.S3) align 4 %agg.result, %struct.S3* noundef %agg.tmp)
// CHECK: define {{.*}} void @{{.*}}f3{{.*}}(ptr noalias sret(%struct.S3) align 4 %agg.result)
// CHECK: call void {{.*}}func3{{.*}}(ptr sret(%struct.S3) align 4 %agg.result, ptr noundef %agg.tmp)
struct S3 {
int a[5];
};
Expand All @@ -42,8 +42,8 @@ S3 f3() {

// Pass and return aggregate (of size < 16 bytes) with non-trivial destructor.
// Passed directly but returned indirectly.
// CHECK: define {{.*}} void {{.*}}f4{{.*}}(%struct.S4* inreg noalias sret(%struct.S4) align 4 %agg.result)
// CHECK: call void {{.*}}func4{{.*}}(%struct.S4* inreg sret(%struct.S4) align 4 %agg.result, [2 x i64] %5)
// CHECK: define {{.*}} void {{.*}}f4{{.*}}(ptr inreg noalias sret(%struct.S4) align 4 %agg.result)
// CHECK: call void {{.*}}func4{{.*}}(ptr inreg sret(%struct.S4) align 4 %agg.result, [2 x i64] %0)
struct S4 {
int a[3];
~S4();
Expand All @@ -56,8 +56,8 @@ S4 f4() {
}

// Pass and return from instance method called from instance method.
// CHECK: define {{.*}} void @{{.*}}bar@Q1{{.*}}(%class.Q1* {{[^,]*}} %this, %class.P1* inreg noalias sret(%class.P1) align 1 %agg.result)
// CHECK: call void {{.*}}foo@P1{{.*}}(%class.P1* noundef{{[^,]*}} %ref.tmp, %class.P1* inreg sret(%class.P1) align 1 %agg.result, i8 %1)
// CHECK: define {{.*}} void @{{.*}}bar@Q1{{.*}}(ptr {{[^,]*}} %this, ptr inreg noalias sret(%class.P1) align 1 %agg.result)
// CHECK: call void {{.*}}foo@P1{{.*}}(ptr noundef{{[^,]*}} %ref.tmp, ptr inreg sret(%class.P1) align 1 %agg.result, i8 %0)

class P1 {
public:
Expand All @@ -76,7 +76,7 @@ P1 Q1::bar() {

// Pass and return from instance method called from free function.
// CHECK: define {{.*}} void {{.*}}bar{{.*}}()
// CHECK: call void {{.*}}foo@P2{{.*}}(%class.P2* noundef{{[^,]*}} %ref.tmp, %class.P2* inreg sret(%class.P2) align 1 %retval, i8 %0)
// CHECK: call void {{.*}}foo@P2{{.*}}(ptr noundef{{[^,]*}} %ref.tmp, ptr inreg sret(%class.P2) align 1 %retval, i8 %0)
class P2 {
public:
P2 foo(P2 x);
Expand All @@ -89,8 +89,8 @@ P2 bar() {

// Pass and return an object with a user-provided constructor (passed directly,
// returned indirectly)
// CHECK: define {{.*}} void @{{.*}}f5{{.*}}(%struct.S5* inreg noalias sret(%struct.S5) align 4 %agg.result)
// CHECK: call void {{.*}}func5{{.*}}(%struct.S5* inreg sret(%struct.S5) align 4 %agg.result, i64 {{.*}})
// CHECK: define {{.*}} void @{{.*}}f5{{.*}}(ptr inreg noalias sret(%struct.S5) align 4 %agg.result)
// CHECK: call void {{.*}}func5{{.*}}(ptr inreg sret(%struct.S5) align 4 %agg.result, i64 {{.*}})
struct S5 {
S5();
int x;
Expand Down Expand Up @@ -146,8 +146,8 @@ struct S8 {
int y;
};

// CHECK: define {{.*}} void {{.*}}?f8{{.*}}(%struct.S8* inreg noalias sret(%struct.S8) align 4 {{.*}})
// CHECK: call void {{.*}}func8{{.*}}(%struct.S8* inreg sret(%struct.S8) align 4 {{.*}}, i64 {{.*}})
// CHECK: define {{.*}} void {{.*}}?f8{{.*}}(ptr inreg noalias sret(%struct.S8) align 4 {{.*}})
// CHECK: call void {{.*}}func8{{.*}}(ptr inreg sret(%struct.S8) align 4 {{.*}}, i64 {{.*}})
S8 func8(S8 x);
S8 f8() {
S8 x;
Expand All @@ -157,8 +157,8 @@ S8 f8() {

// Pass and return an object with a non-trivial copy-assignment operator and
// a trivial copy constructor (passed directly, returned indirectly)
// CHECK: define {{.*}} void @"?f9@@YA?AUS9@@XZ"(%struct.S9* inreg noalias sret(%struct.S9) align 4 {{.*}})
// CHECK: call void {{.*}}func9{{.*}}(%struct.S9* inreg sret(%struct.S9) align 4 {{.*}}, i64 {{.*}})
// CHECK: define {{.*}} void @"?f9@@YA?AUS9@@XZ"(ptr inreg noalias sret(%struct.S9) align 4 {{.*}})
// CHECK: call void {{.*}}func9{{.*}}(ptr inreg sret(%struct.S9) align 4 {{.*}}, i64 {{.*}})
struct S9 {
S9& operator=(const S9&);
int x;
Expand All @@ -174,8 +174,8 @@ S9 f9() {

// Pass and return an object with a base class (passed directly, returned
// indirectly).
// CHECK: define dso_local void {{.*}}f10{{.*}}(%struct.S10* inreg noalias sret(%struct.S10) align 4 {{.*}})
// CHECK: call void {{.*}}func10{{.*}}(%struct.S10* inreg sret(%struct.S10) align 4 {{.*}}, [2 x i64] {{.*}})
// CHECK: define dso_local void {{.*}}f10{{.*}}(ptr inreg noalias sret(%struct.S10) align 4 {{.*}})
// CHECK: call void {{.*}}func10{{.*}}(ptr inreg sret(%struct.S10) align 4 {{.*}}, [2 x i64] {{.*}})
struct S10 : public S1 {
int x;
};
Expand All @@ -189,8 +189,8 @@ S10 f10() {

// Pass and return a non aggregate object exceeding > 128 bits (passed
// indirectly, returned indirectly)
// CHECK: define dso_local void {{.*}}f11{{.*}}(%struct.S11* inreg noalias sret(%struct.S11) align 8 {{.*}})
// CHECK: call void {{.*}}func11{{.*}}(%struct.S11* inreg sret(%struct.S11) align 8 {{.*}}, %struct.S11* {{.*}})
// CHECK: define dso_local void {{.*}}f11{{.*}}(ptr inreg noalias sret(%struct.S11) align 8 {{.*}})
// CHECK: call void {{.*}}func11{{.*}}(ptr inreg sret(%struct.S11) align 8 {{.*}}, ptr {{.*}})
struct S11 {
virtual void f();
int a[5];
Expand Down
36 changes: 18 additions & 18 deletions clang/test/CodeGen/atomic-ops-libcall.c
@@ -1,4 +1,4 @@
// RUN: %clang_cc1 -no-opaque-pointers < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s

// FIXME: This file should not be checking -O1 output.
// Ie, it is testing many IR optimizer passes as part of front-end verification.
Expand All @@ -10,109 +10,109 @@ enum memory_order {

int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
// CHECK: test_c11_atomic_fetch_add_int_ptr
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 12, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 12, i32 noundef 5)
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}

int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
// CHECK: test_c11_atomic_fetch_sub_int_ptr
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 20, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 20, i32 noundef 5)
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}

int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
// CHECK: test_c11_atomic_fetch_add_int
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 3, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 3, i32 noundef 5)
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}

int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
// CHECK: test_c11_atomic_fetch_sub_int
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 5, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 5, i32 noundef 5)
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}

int *fp2a(int **p) {
// CHECK: @fp2a
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 4, i32 noundef 0)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 4, i32 noundef 0)
// Note, the GNU builtins do not multiply by sizeof(T)!
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}

int test_atomic_fetch_add(int *p) {
// CHECK: test_atomic_fetch_add
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_add(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_sub(int *p) {
// CHECK: test_atomic_fetch_sub
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_and(int *p) {
// CHECK: test_atomic_fetch_and
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_and(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_or(int *p) {
// CHECK: test_atomic_fetch_or
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_or(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_xor(int *p) {
// CHECK: test_atomic_fetch_xor
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
}

int test_atomic_fetch_nand(int *p) {
// CHECK: test_atomic_fetch_nand
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
}

int test_atomic_add_fetch(int *p) {
// CHECK: test_atomic_add_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
return __atomic_add_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_sub_fetch(int *p) {
// CHECK: test_atomic_sub_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_and_fetch(int *p) {
// CHECK: test_atomic_and_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
return __atomic_and_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_or_fetch(int *p) {
// CHECK: test_atomic_or_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
return __atomic_or_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_xor_fetch(int *p) {
// CHECK: test_atomic_xor_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
}

int test_atomic_nand_fetch(int *p) {
// CHECK: test_atomic_nand_fetch
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
// CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// FIXME: We should not be checking optimized IR. It changes independently of clang.
// FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55
// FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1
Expand Down

0 comments on commit f3fa108

Please sign in to comment.