|
| 1 | +; Verify register types we generate in PTX. |
| 2 | +; RUN: llc -O0 < %s -march=nvptx -mcpu=sm_20 | FileCheck %s |
| 3 | +; RUN: llc -O0 < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s |
| 4 | +; RUN: llc -O0 < %s -march=nvptx -mcpu=sm_20 | FileCheck %s -check-prefixes=NO8BIT |
| 5 | +; RUN: llc -O0 < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s -check-prefixes=NO8BIT |
| 6 | + |
| 7 | +; CHECK-LABEL: .visible .func func() |
| 8 | +; NO8BIT-LABEL: .visible .func func() |
| 9 | +define void @func() { |
| 10 | +entry: |
| 11 | + %s8 = alloca i8, align 1 |
| 12 | + %u8 = alloca i8, align 1 |
| 13 | + %s16 = alloca i16, align 2 |
| 14 | + %u16 = alloca i16, align 2 |
| 15 | +; Both 8- and 16-bit integers are packed into 16-bit registers. |
| 16 | +; CHECK-DAG: .reg .b16 %rs< |
| 17 | +; We should not generate 8-bit registers. |
| 18 | +; NO8BIT-NOT: .reg .{{[bsu]}}8 |
| 19 | + %s32 = alloca i32, align 4 |
| 20 | + %u32 = alloca i32, align 4 |
| 21 | +; CHECK-DAG: .reg .b32 %r< |
| 22 | + %s64 = alloca i64, align 8 |
| 23 | + %u64 = alloca i64, align 8 |
| 24 | +; CHECK-DAG: .reg .b64 %rd< |
| 25 | + %f32 = alloca float, align 4 |
| 26 | +; CHECK-DAG: .reg .f32 %f< |
| 27 | + %f64 = alloca double, align 8 |
| 28 | +; CHECK-DAG: .reg .f64 %fd< |
| 29 | + |
| 30 | +; Verify that we use correct register types. |
| 31 | + store i8 1, i8* %s8, align 1 |
| 32 | +; CHECK: mov.u16 [[R1:%rs[0-9]]], 1; |
| 33 | +; CHECK-NEXT: st.u8 {{.*}}, [[R1]] |
| 34 | + store i8 2, i8* %u8, align 1 |
| 35 | +; CHECK: mov.u16 [[R2:%rs[0-9]]], 2; |
| 36 | +; CHECK-NEXT: st.u8 {{.*}}, [[R2]] |
| 37 | + store i16 3, i16* %s16, align 2 |
| 38 | +; CHECK: mov.u16 [[R3:%rs[0-9]]], 3; |
| 39 | +; CHECK-NEXT: st.u16 {{.*}}, [[R3]] |
| 40 | + store i16 4, i16* %u16, align 2 |
| 41 | +; CHECK: mov.u16 [[R4:%rs[0-9]]], 4; |
| 42 | +; CHECK-NEXT: st.u16 {{.*}}, [[R4]] |
| 43 | + store i32 5, i32* %s32, align 4 |
| 44 | +; CHECK: mov.u32 [[R5:%r[0-9]]], 5; |
| 45 | +; CHECK-NEXT: st.u32 {{.*}}, [[R5]] |
| 46 | + store i32 6, i32* %u32, align 4 |
| 47 | +; CHECK: mov.u32 [[R6:%r[0-9]]], 6; |
| 48 | +; CHECK-NEXT: st.u32 {{.*}}, [[R6]] |
| 49 | + store i64 7, i64* %s64, align 8 |
| 50 | +; CHECK: mov.u64 [[R7:%rd[0-9]]], 7; |
| 51 | +; CHECK-NEXT: st.u64 {{.*}}, [[R7]] |
| 52 | + store i64 8, i64* %u64, align 8 |
| 53 | +; CHECK: mov.u64 [[R8:%rd[0-9]]], 8; |
| 54 | +; CHECK-NEXT: st.u64 {{.*}}, [[R8]] |
| 55 | + |
| 56 | +; FP constants are stored via integer registers, but that's an |
| 57 | +; implementation detail that's irrelevant here. |
| 58 | + store float 9.000000e+00, float* %f32, align 4 |
| 59 | + store double 1.000000e+01, double* %f64, align 8 |
| 60 | +; Instead, we force a load into a register and then verify register type. |
| 61 | + %f32v = load volatile float, float* %f32, align 4 |
| 62 | +; CHECK: ld.volatile.f32 %f{{[0-9]+}} |
| 63 | + %f64v = load volatile double, double* %f64, align 8 |
| 64 | +; CHECK: ld.volatile.f64 %fd{{[0-9]+}} |
| 65 | + ret void |
| 66 | +; CHECK: ret; |
| 67 | +; NO8BIT: ret; |
| 68 | +} |
| 69 | + |
0 commit comments