From f808c8a23c293b3aac019653d28d5ff0def54de7 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Sat, 29 Jun 2024 07:53:58 +0200 Subject: [PATCH] AMDGPU: Add baseline test for copysign combine Pre-commit tests showing we try to SimplifyDemandedBits on the sign operand. --- .../AMDGPU/copysign-simplify-demanded-bits.ll | 165 ++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll new file mode 100644 index 0000000000000..1eccb55202f02 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll @@ -0,0 +1,165 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s + +; Test that we use SimplifyDemandedBits on copysign's sign +; operand. These are somewhat simplified extractions from fast pown +; expansions. + +define half @test_pown_reduced_fast_f16_known_odd(half %x, i32 %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_f16_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX9-NEXT: v_and_b32_e32 v2, 0xffff8000, v0 +; GFX9-NEXT: s_movk_i32 s4, 0x7fff +; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX9-NEXT: v_mul_f16_e64 v0, |v0|, v1 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or i32 %y.arg, 1 + %fabs = call half @llvm.fabs.f16(half %x) + %pownI2F = sitofp i32 %y to half + %ylogx = fmul half %fabs, %pownI2F + %cast_x = bitcast half %x to i16 + %pow_sign = and i16 %cast_x, -32768 + %cast_sign = bitcast i16 %pow_sign to half + %pow_sign1 = call half @llvm.copysign.f16(half %ylogx, half %cast_sign) + ret half %pow_sign1 +} + +define <2 x half> @test_pown_reduced_fast_v2f16_known_odd(<2 x half> %x, <2 x i32> %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_v2f16_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 +; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX9-NEXT: v_and_b32_e32 v3, 0x7fff7fff, v0 +; GFX9-NEXT: v_and_b32_e32 v0, 0x80008000, v0 +; GFX9-NEXT: v_cvt_f16_f32_e32 v2, v2 +; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1 +; GFX9-NEXT: s_movk_i32 s4, 0x7fff +; GFX9-NEXT: v_pack_b32_f16 v1, v1, v2 +; GFX9-NEXT: v_pk_mul_f16 v1, v3, v1 +; GFX9-NEXT: v_bfi_b32 v2, s4, v1, v0 +; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1 +; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0 +; GFX9-NEXT: v_bfi_b32 v0, s4, v1, v0 +; GFX9-NEXT: s_mov_b32 s4, 0x5040100 +; GFX9-NEXT: v_perm_b32 v0, v0, v2, s4 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or <2 x i32> %y.arg, + %fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %x) + %pownI2F = sitofp <2 x i32> %y to <2 x half> + %ylogx = fmul <2 x half> %fabs, %pownI2F + %cast_x = bitcast <2 x half> %x to <2 x i16> + %pow_sign = and <2 x i16> %cast_x, + %cast_sign = bitcast <2 x i16> %pow_sign to <2 x half> + %pow_sign1 = call <2 x half> @llvm.copysign.v2f16(<2 x half> %ylogx, <2 x half> %cast_sign) + ret <2 x half> %pow_sign1 +} + +define float @test_pown_reduced_fast_f32_known_odd(float %x, i32 %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_f32_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v1, 1, v1 +; GFX9-NEXT: v_cvt_f32_i32_e32 v1, v1 +; GFX9-NEXT: v_and_b32_e32 v2, 0x80000000, v0 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_mul_f32_e64 v0, |v0|, v1 +; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or i32 %y.arg, 1 + %fabs = call float @llvm.fabs.f32(float %x) + %pownI2F = sitofp i32 %y to float + %ylogx = fmul float %fabs, %pownI2F + %cast_x = bitcast float %x to i32 + %pow_sign = and i32 %cast_x, -2147483648 + %cast_sign = bitcast i32 %pow_sign to float + %pow_sign1 = call float @llvm.copysign.f32(float %ylogx, float %cast_sign) + ret float %pow_sign1 +} + +define <2 x float> @test_pown_reduced_fast_v2f32_known_odd(<2 x float> %x, <2 x i32> %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_v2f32_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v3, 1, v3 +; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 +; GFX9-NEXT: v_cvt_f32_i32_e32 v3, v3 +; GFX9-NEXT: v_cvt_f32_i32_e32 v2, v2 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_mul_f32_e64 v3, |v1|, v3 +; GFX9-NEXT: v_mul_f32_e64 v2, |v0|, v2 +; GFX9-NEXT: v_and_b32_e32 v1, 0x80000000, v1 +; GFX9-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; GFX9-NEXT: v_bfi_b32 v0, s4, v2, v0 +; GFX9-NEXT: v_bfi_b32 v1, s4, v3, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or <2 x i32> %y.arg, + %fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %x) + %pownI2F = sitofp <2 x i32> %y to <2 x float> + %ylogx = fmul <2 x float> %fabs, %pownI2F + %cast_x = bitcast <2 x float> %x to <2 x i32> + %pow_sign = and <2 x i32> %cast_x, + %cast_sign = bitcast <2 x i32> %pow_sign to <2 x float> + %pow_sign1 = call <2 x float> @llvm.copysign.v2f32(<2 x float> %ylogx, <2 x float> %cast_sign) + ret <2 x float> %pow_sign1 +} + +define double @test_pown_reduced_fast_f64_known_odd(double %x, i32 %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_f64_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v2, 1, v2 +; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v2 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_mul_f64 v[2:3], |v[0:1]|, v[2:3] +; GFX9-NEXT: v_and_b32_e32 v0, 0x80000000, v1 +; GFX9-NEXT: v_bfi_b32 v1, s4, v3, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or i32 %y.arg, 1 + %fabs = call double @llvm.fabs.f64(double %x) + %pownI2F = sitofp i32 %y to double + %ylogx = fmul double %fabs, %pownI2F + %cast_x = bitcast double %x to i64 + %pow_sign = and i64 %cast_x, -9223372036854775808 + %cast_sign = bitcast i64 %pow_sign to double + %pow_sign1 = call double @llvm.copysign.f64(double %ylogx, double %cast_sign) + ret double %pow_sign1 +} + +define <2 x double> @test_pown_reduced_fast_v2f64_known_odd(<2 x double> %x, <2 x i32> %y.arg) #0 { +; GFX9-LABEL: test_pown_reduced_fast_v2f64_known_odd: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_or_b32_e32 v6, 1, v5 +; GFX9-NEXT: v_or_b32_e32 v4, 1, v4 +; GFX9-NEXT: v_cvt_f64_i32_e32 v[4:5], v4 +; GFX9-NEXT: v_cvt_f64_i32_e32 v[6:7], v6 +; GFX9-NEXT: s_brev_b32 s4, -2 +; GFX9-NEXT: v_mul_f64 v[4:5], |v[0:1]|, v[4:5] +; GFX9-NEXT: v_mul_f64 v[6:7], |v[2:3]|, v[6:7] +; GFX9-NEXT: v_and_b32_e32 v0, 0x80000000, v3 +; GFX9-NEXT: v_and_b32_e32 v1, 0x80000000, v1 +; GFX9-NEXT: v_bfi_b32 v1, s4, v5, v1 +; GFX9-NEXT: v_bfi_b32 v3, s4, v7, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-NEXT: v_mov_b32_e32 v2, v6 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %y = or <2 x i32> %y.arg, + %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %x) + %pownI2F = sitofp <2 x i32> %y to <2 x double> + %ylogx = fmul <2 x double> %fabs, %pownI2F + %cast_x = bitcast <2 x double> %x to <2 x i64> + %pow_sign = and <2 x i64> %cast_x, + %cast_sign = bitcast <2 x i64> %pow_sign to <2 x double> + %pow_sign1 = call <2 x double> @llvm.copysign.f64(<2 x double> %ylogx, <2 x double> %cast_sign) + ret <2 x double> %pow_sign1 +} + +attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }