From c882ffc4145f8aee65bce430213cc3b09d8546e6 Mon Sep 17 00:00:00 2001 From: Patrick Mackinlay Date: Tue, 14 Oct 2025 15:09:54 +0700 Subject: [PATCH] 3rdparty/asmjit: sync with upstream 1.20 * from https://github.com/asmjit/asmjit/commit/5134d396bd00c1b63259387acdbb12dfdf009f9b --- 3rdparty/asmjit/CMakeLists.txt | 338 +- 3rdparty/asmjit/LICENSE.md | 2 +- 3rdparty/asmjit/README.md | 70 +- 3rdparty/asmjit/configure.sh | 11 + 3rdparty/asmjit/configure_sanitizers.sh | 15 + 3rdparty/asmjit/configure_vs2022_x64.bat | 2 + 3rdparty/asmjit/configure_vs2022_x86.bat | 2 + 3rdparty/asmjit/src/asmjit.natvis | 264 +- 3rdparty/asmjit/src/asmjit/a64.h | 35 +- 3rdparty/asmjit/src/asmjit/arm.h | 53 +- .../asmjit/src/asmjit/arm/a64archtraits_p.h | 58 +- .../asmjit/src/asmjit/arm/a64assembler.cpp | 3613 +++---- 3rdparty/asmjit/src/asmjit/arm/a64assembler.h | 12 +- 3rdparty/asmjit/src/asmjit/arm/a64builder.cpp | 32 +- 3rdparty/asmjit/src/asmjit/arm/a64builder.h | 8 +- .../asmjit/src/asmjit/arm/a64compiler.cpp | 45 +- 3rdparty/asmjit/src/asmjit/arm/a64compiler.h | 191 +- .../asmjit/src/asmjit/arm/a64emithelper.cpp | 337 +- .../asmjit/src/asmjit/arm/a64emithelper_p.h | 33 +- 3rdparty/asmjit/src/asmjit/arm/a64emitter.h | 1070 +- .../asmjit/src/asmjit/arm/a64formatter.cpp | 37 +- .../asmjit/src/asmjit/arm/a64formatter_p.h | 7 +- 3rdparty/asmjit/src/asmjit/arm/a64func.cpp | 216 +- 3rdparty/asmjit/src/asmjit/arm/a64func_p.h | 6 +- 3rdparty/asmjit/src/asmjit/arm/a64globals.h | 68 +- 3rdparty/asmjit/src/asmjit/arm/a64instapi.cpp | 186 +- 3rdparty/asmjit/src/asmjit/arm/a64instapi_p.h | 12 +- 3rdparty/asmjit/src/asmjit/arm/a64instdb.cpp | 1706 ++-- 3rdparty/asmjit/src/asmjit/arm/a64instdb.h | 25 +- 3rdparty/asmjit/src/asmjit/arm/a64instdb_p.h | 492 +- 3rdparty/asmjit/src/asmjit/arm/a64operand.cpp | 72 +- 3rdparty/asmjit/src/asmjit/arm/a64operand.h | 1393 ++- 3rdparty/asmjit/src/asmjit/arm/a64rapass.cpp | 947 +- 3rdparty/asmjit/src/asmjit/arm/a64rapass_p.h | 44 +- .../asmjit/src/asmjit/arm/armformatter.cpp | 296 +- .../asmjit/src/asmjit/arm/armformatter_p.h | 30 +- 3rdparty/asmjit/src/asmjit/arm/armglobals.h | 2 +- 3rdparty/asmjit/src/asmjit/arm/armoperand.h | 396 - 3rdparty/asmjit/src/asmjit/arm/armutils.h | 123 +- .../asmjit/src/asmjit/asmjit-scope-begin.h | 2 +- 3rdparty/asmjit/src/asmjit/asmjit-scope-end.h | 2 +- 3rdparty/asmjit/src/asmjit/asmjit.h | 2 + 3rdparty/asmjit/src/asmjit/core.h | 922 +- 3rdparty/asmjit/src/asmjit/core/api-build_p.h | 6 +- 3rdparty/asmjit/src/asmjit/core/api-config.h | 593 +- 3rdparty/asmjit/src/asmjit/core/archcommons.h | 29 +- .../asmjit/src/asmjit/core/archtraits.cpp | 148 +- 3rdparty/asmjit/src/asmjit/core/archtraits.h | 149 +- 3rdparty/asmjit/src/asmjit/core/arena.cpp | 515 + 3rdparty/asmjit/src/asmjit/core/arena.h | 498 + .../asmjit/src/asmjit/core/arenabitset.cpp | 253 + .../asmjit/src/asmjit/core/arenabitset_p.h | 436 + .../core/{zonehash.cpp => arenahash.cpp} | 126 +- 3rdparty/asmjit/src/asmjit/core/arenahash.h | 198 + .../core/{zonelist.cpp => arenalist.cpp} | 38 +- 3rdparty/asmjit/src/asmjit/core/arenalist.h | 221 + 3rdparty/asmjit/src/asmjit/core/arenapool.h | 66 + .../core/{zonestring.h => arenastring.h} | 59 +- .../core/{zonetree.cpp => arenatree.cpp} | 54 +- 3rdparty/asmjit/src/asmjit/core/arenatree.h | 407 + .../asmjit/src/asmjit/core/arenavector.cpp | 293 + 3rdparty/asmjit/src/asmjit/core/arenavector.h | 625 ++ 3rdparty/asmjit/src/asmjit/core/assembler.cpp | 378 +- 3rdparty/asmjit/src/asmjit/core/assembler.h | 73 +- 3rdparty/asmjit/src/asmjit/core/builder.cpp | 917 +- 3rdparty/asmjit/src/asmjit/core/builder.h | 1037 +- 3rdparty/asmjit/src/asmjit/core/builder_p.h | 17 +- 3rdparty/asmjit/src/asmjit/core/codebuffer.h | 34 +- .../asmjit/src/asmjit/core/codeholder.cpp | 1493 +-- 3rdparty/asmjit/src/asmjit/core/codeholder.h | 1266 ++- .../asmjit/src/asmjit/core/codewriter.cpp | 165 +- .../asmjit/src/asmjit/core/codewriter_p.h | 103 +- 3rdparty/asmjit/src/asmjit/core/compiler.cpp | 673 +- 3rdparty/asmjit/src/asmjit/core/compiler.h | 586 +- .../asmjit/src/asmjit/core/compilerdefs.h | 224 +- 3rdparty/asmjit/src/asmjit/core/constpool.cpp | 267 +- 3rdparty/asmjit/src/asmjit/core/constpool.h | 101 +- 3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp | 1845 ++-- 3rdparty/asmjit/src/asmjit/core/cpuinfo.h | 1231 ++- .../asmjit/src/asmjit/core/emithelper.cpp | 335 +- .../asmjit/src/asmjit/core/emithelper_p.h | 27 +- 3rdparty/asmjit/src/asmjit/core/emitter.cpp | 352 +- 3rdparty/asmjit/src/asmjit/core/emitter.h | 508 +- .../asmjit/src/asmjit/core/emitterutils.cpp | 92 +- .../asmjit/src/asmjit/core/emitterutils_p.h | 57 +- .../asmjit/src/asmjit/core/environment.cpp | 17 +- 3rdparty/asmjit/src/asmjit/core/environment.h | 288 +- .../asmjit/src/asmjit/core/errorhandler.cpp | 7 +- .../asmjit/src/asmjit/core/errorhandler.h | 40 +- 3rdparty/asmjit/src/asmjit/core/fixup.h | 282 + 3rdparty/asmjit/src/asmjit/core/formatter.cpp | 538 +- 3rdparty/asmjit/src/asmjit/core/formatter.h | 121 +- 3rdparty/asmjit/src/asmjit/core/formatter_p.h | 16 +- 3rdparty/asmjit/src/asmjit/core/func.cpp | 274 +- 3rdparty/asmjit/src/asmjit/core/func.h | 1193 ++- .../src/asmjit/core/funcargscontext.cpp | 383 +- .../src/asmjit/core/funcargscontext_p.h | 227 +- 3rdparty/asmjit/src/asmjit/core/globals.cpp | 24 +- 3rdparty/asmjit/src/asmjit/core/globals.h | 351 +- 3rdparty/asmjit/src/asmjit/core/inst.cpp | 82 +- 3rdparty/asmjit/src/asmjit/core/inst.h | 450 +- 3rdparty/asmjit/src/asmjit/core/instdb.cpp | 125 +- 3rdparty/asmjit/src/asmjit/core/instdb_p.h | 9 +- .../asmjit/src/asmjit/core/jitallocator.cpp | 1441 +-- .../asmjit/src/asmjit/core/jitallocator.h | 281 +- .../asmjit/src/asmjit/core/jitruntime.cpp | 51 +- 3rdparty/asmjit/src/asmjit/core/jitruntime.h | 16 +- 3rdparty/asmjit/src/asmjit/core/logger.cpp | 18 +- 3rdparty/asmjit/src/asmjit/core/logger.h | 50 +- 3rdparty/asmjit/src/asmjit/core/misc_p.h | 2 +- 3rdparty/asmjit/src/asmjit/core/operand.cpp | 118 +- 3rdparty/asmjit/src/asmjit/core/operand.h | 2459 +++-- 3rdparty/asmjit/src/asmjit/core/osutils.cpp | 19 +- 3rdparty/asmjit/src/asmjit/core/osutils.h | 4 +- 3rdparty/asmjit/src/asmjit/core/osutils_p.h | 4 +- .../asmjit/src/asmjit/core/raassignment_p.h | 468 +- .../asmjit/src/asmjit/core/rabuilders_p.h | 612 -- .../asmjit/src/asmjit/core/racfgblock_p.h | 390 + .../asmjit/src/asmjit/core/racfgbuilder_p.h | 653 ++ .../asmjit/src/asmjit/core/raconstraints_p.h | 65 + 3rdparty/asmjit/src/asmjit/core/radefs_p.h | 921 +- 3rdparty/asmjit/src/asmjit/core/rainst_p.h | 438 + 3rdparty/asmjit/src/asmjit/core/ralocal.cpp | 1267 +-- 3rdparty/asmjit/src/asmjit/core/ralocal_p.h | 233 +- 3rdparty/asmjit/src/asmjit/core/rapass.cpp | 2427 +++-- 3rdparty/asmjit/src/asmjit/core/rapass_p.h | 1211 +-- 3rdparty/asmjit/src/asmjit/core/rareg_p.h | 353 + 3rdparty/asmjit/src/asmjit/core/rastack.cpp | 111 +- 3rdparty/asmjit/src/asmjit/core/rastack_p.h | 102 +- 3rdparty/asmjit/src/asmjit/core/span.h | 385 + 3rdparty/asmjit/src/asmjit/core/string.cpp | 457 +- 3rdparty/asmjit/src/asmjit/core/string.h | 214 +- 3rdparty/asmjit/src/asmjit/core/support.cpp | 653 +- 3rdparty/asmjit/src/asmjit/core/support.h | 2489 ++--- 3rdparty/asmjit/src/asmjit/core/support_p.h | 234 +- 3rdparty/asmjit/src/asmjit/core/target.cpp | 5 +- 3rdparty/asmjit/src/asmjit/core/target.h | 20 +- 3rdparty/asmjit/src/asmjit/core/type.cpp | 80 +- 3rdparty/asmjit/src/asmjit/core/type.h | 315 +- 3rdparty/asmjit/src/asmjit/core/virtmem.cpp | 821 +- 3rdparty/asmjit/src/asmjit/core/virtmem.h | 91 +- 3rdparty/asmjit/src/asmjit/core/zone.cpp | 353 - 3rdparty/asmjit/src/asmjit/core/zone.h | 611 -- 3rdparty/asmjit/src/asmjit/core/zonehash.h | 186 - 3rdparty/asmjit/src/asmjit/core/zonelist.h | 208 - 3rdparty/asmjit/src/asmjit/core/zonestack.cpp | 186 - 3rdparty/asmjit/src/asmjit/core/zonestack.h | 237 - 3rdparty/asmjit/src/asmjit/core/zonetree.h | 376 - .../asmjit/src/asmjit/core/zonevector.cpp | 423 - 3rdparty/asmjit/src/asmjit/core/zonevector.h | 744 -- 3rdparty/asmjit/src/asmjit/host.h | 33 + 3rdparty/asmjit/src/asmjit/ujit.h | 17 + 3rdparty/asmjit/src/asmjit/ujit/ujitbase.h | 734 ++ 3rdparty/asmjit/src/asmjit/ujit/unicompiler.h | 2097 ++++ .../src/asmjit/ujit/unicompiler_a64.cpp | 4391 ++++++++ .../src/asmjit/ujit/unicompiler_utils_p.h | 32 + .../src/asmjit/ujit/unicompiler_x86.cpp | 7591 ++++++++++++++ .../asmjit/src/asmjit/ujit/unicondition.h | 293 + 3rdparty/asmjit/src/asmjit/ujit/uniop.h | 819 ++ .../asmjit/src/asmjit/ujit/vecconsttable.cpp | 20 + .../asmjit/src/asmjit/ujit/vecconsttable.h | 454 + 3rdparty/asmjit/src/asmjit/x86.h | 35 +- .../asmjit/src/asmjit/x86/x86archtraits_p.h | 129 +- .../asmjit/src/asmjit/x86/x86assembler.cpp | 3263 +++--- 3rdparty/asmjit/src/asmjit/x86/x86assembler.h | 277 +- 3rdparty/asmjit/src/asmjit/x86/x86builder.cpp | 31 +- 3rdparty/asmjit/src/asmjit/x86/x86builder.h | 148 +- .../asmjit/src/asmjit/x86/x86compiler.cpp | 45 +- 3rdparty/asmjit/src/asmjit/x86/x86compiler.h | 658 +- .../asmjit/src/asmjit/x86/x86emithelper.cpp | 696 +- .../asmjit/src/asmjit/x86/x86emithelper_p.h | 105 +- 3rdparty/asmjit/src/asmjit/x86/x86emitter.h | 2096 ++-- .../asmjit/src/asmjit/x86/x86formatter.cpp | 693 +- .../asmjit/src/asmjit/x86/x86formatter_p.h | 18 +- 3rdparty/asmjit/src/asmjit/x86/x86func.cpp | 449 +- 3rdparty/asmjit/src/asmjit/x86/x86func_p.h | 6 +- 3rdparty/asmjit/src/asmjit/x86/x86globals.h | 490 +- 3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp | 1664 +-- 3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h | 14 +- 3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp | 8992 ++++++++--------- 3rdparty/asmjit/src/asmjit/x86/x86instdb.h | 409 +- 3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h | 81 +- 3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h | 46 +- 3rdparty/asmjit/src/asmjit/x86/x86operand.cpp | 210 +- 3rdparty/asmjit/src/asmjit/x86/x86operand.h | 1393 +-- 3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp | 1673 +-- 3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h | 56 +- .../asmjit/test/asmjit_test_compiler_a64.cpp | 687 -- .../asmjit/test/asmjit_test_compiler_x86.cpp | 4691 --------- 3rdparty/asmjit/test/asmjit_test_execute.cpp | 103 - 3rdparty/asmjit/test/asmjit_test_instinfo.cpp | 191 - 3rdparty/asmjit/test/asmjit_test_perf.cpp | 72 - 3rdparty/asmjit/test/asmjit_test_perf.h | 114 - 3rdparty/asmjit/test/asmjit_test_perf_a64.cpp | 707 -- 3rdparty/asmjit/test/asmjit_test_perf_x86.cpp | 5118 ---------- 3rdparty/asmjit/test/asmjit_test_unit.cpp | 173 - 3rdparty/asmjit/test/asmjitutils.h | 60 - 3rdparty/asmjit/test/performancetimer.h | 33 - .../testing/bench/asmjit_bench_codegen.cpp | 82 + .../testing/bench/asmjit_bench_codegen.h | 117 + .../bench/asmjit_bench_codegen_a64.cpp | 707 ++ .../bench/asmjit_bench_codegen_x86.cpp | 5336 ++++++++++ .../testing/bench/asmjit_bench_overhead.cpp | 481 + .../testing/bench/asmjit_bench_regalloc.cpp | 517 + 3rdparty/asmjit/testing/commons/asmjitutils.h | 242 + .../{test => testing/commons}/cmdline.h | 28 +- .../asmjit/testing/commons/performancetimer.h | 28 + 3rdparty/asmjit/testing/commons/random.h | 77 + .../tests}/asmjit_test_assembler.cpp | 66 +- .../tests}/asmjit_test_assembler.h | 44 +- .../tests}/asmjit_test_assembler_a64.cpp | 66 +- .../tests}/asmjit_test_assembler_x64.cpp | 166 +- .../tests}/asmjit_test_assembler_x86.cpp | 150 +- .../tests}/asmjit_test_compiler.cpp | 189 +- .../tests}/asmjit_test_compiler.h | 28 +- .../tests/asmjit_test_compiler_a64.cpp | 687 ++ .../tests/asmjit_test_compiler_x86.cpp | 4693 +++++++++ .../tests}/asmjit_test_emitters.cpp | 192 +- .../testing/tests/asmjit_test_environment.cpp | 302 + .../testing/tests/asmjit_test_instinfo.cpp | 206 + .../tests}/asmjit_test_misc.h | 162 +- .../testing/tests/asmjit_test_runner.cpp | 171 + .../testing/tests/asmjit_test_unicompiler.cpp | 5674 +++++++++++ .../tests/asmjit_test_unicompiler_avx2fma.cpp | 49 + .../tests/asmjit_test_unicompiler_sse2.cpp | 72 + .../tests}/asmjit_test_x86_sections.cpp | 106 +- .../asmjit/{test => testing/tests}/broken.cpp | 38 +- .../asmjit/{test => testing/tests}/broken.h | 4 +- 3rdparty/asmjit/tools/configure-makefiles.sh | 13 - 3rdparty/asmjit/tools/configure-ninja.sh | 13 - 3rdparty/asmjit/tools/configure-sanitizers.sh | 17 - .../asmjit/tools/configure-vs2019-x64.bat | 2 - .../asmjit/tools/configure-vs2019-x86.bat | 2 - .../asmjit/tools/configure-vs2022-x64.bat | 2 - .../asmjit/tools/configure-vs2022-x86.bat | 2 - 3rdparty/asmjit/tools/configure-xcode.sh | 8 - 3rdparty/asmjit/tools/enumgen.js | 15 +- 3rdparty/asmjit/tools/generator-commons.js | 36 +- 3rdparty/asmjit/tools/generator-cxx.js | 10 +- 3rdparty/asmjit/tools/tablegen-a64.js | 13 +- 3rdparty/asmjit/tools/tablegen-x86.js | 433 +- 3rdparty/asmjit/tools/tablegen.js | 165 +- scripts/src/3rdparty.lua | 47 +- src/devices/cpu/drcbearm64.cpp | 247 +- src/devices/cpu/drcbex64.cpp | 614 +- src/devices/cpu/drcbex86.cpp | 374 +- 246 files changed, 81793 insertions(+), 49908 deletions(-) create mode 100644 3rdparty/asmjit/configure.sh create mode 100644 3rdparty/asmjit/configure_sanitizers.sh create mode 100644 3rdparty/asmjit/configure_vs2022_x64.bat create mode 100644 3rdparty/asmjit/configure_vs2022_x86.bat delete mode 100644 3rdparty/asmjit/src/asmjit/arm/armoperand.h create mode 100644 3rdparty/asmjit/src/asmjit/core/arena.cpp create mode 100644 3rdparty/asmjit/src/asmjit/core/arena.h create mode 100644 3rdparty/asmjit/src/asmjit/core/arenabitset.cpp create mode 100644 3rdparty/asmjit/src/asmjit/core/arenabitset_p.h rename 3rdparty/asmjit/src/asmjit/core/{zonehash.cpp => arenahash.cpp} (82%) create mode 100644 3rdparty/asmjit/src/asmjit/core/arenahash.h rename 3rdparty/asmjit/src/asmjit/core/{zonelist.cpp => arenalist.cpp} (80%) create mode 100644 3rdparty/asmjit/src/asmjit/core/arenalist.h create mode 100644 3rdparty/asmjit/src/asmjit/core/arenapool.h rename 3rdparty/asmjit/src/asmjit/core/{zonestring.h => arenastring.h} (52%) rename 3rdparty/asmjit/src/asmjit/core/{zonetree.cpp => arenatree.cpp} (53%) create mode 100644 3rdparty/asmjit/src/asmjit/core/arenatree.h create mode 100644 3rdparty/asmjit/src/asmjit/core/arenavector.cpp create mode 100644 3rdparty/asmjit/src/asmjit/core/arenavector.h create mode 100644 3rdparty/asmjit/src/asmjit/core/fixup.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/rabuilders_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/racfgblock_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/racfgbuilder_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/raconstraints_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/rainst_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/rareg_p.h create mode 100644 3rdparty/asmjit/src/asmjit/core/span.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zone.cpp delete mode 100644 3rdparty/asmjit/src/asmjit/core/zone.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonehash.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonelist.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonestack.cpp delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonestack.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonetree.h delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonevector.cpp delete mode 100644 3rdparty/asmjit/src/asmjit/core/zonevector.h create mode 100644 3rdparty/asmjit/src/asmjit/host.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/ujitbase.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/unicompiler.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/unicompiler_a64.cpp create mode 100644 3rdparty/asmjit/src/asmjit/ujit/unicompiler_utils_p.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/unicompiler_x86.cpp create mode 100644 3rdparty/asmjit/src/asmjit/ujit/unicondition.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/uniop.h create mode 100644 3rdparty/asmjit/src/asmjit/ujit/vecconsttable.cpp create mode 100644 3rdparty/asmjit/src/asmjit/ujit/vecconsttable.h delete mode 100644 3rdparty/asmjit/test/asmjit_test_compiler_a64.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_compiler_x86.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_execute.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_instinfo.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_perf.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_perf.h delete mode 100644 3rdparty/asmjit/test/asmjit_test_perf_a64.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_perf_x86.cpp delete mode 100644 3rdparty/asmjit/test/asmjit_test_unit.cpp delete mode 100644 3rdparty/asmjit/test/asmjitutils.h delete mode 100644 3rdparty/asmjit/test/performancetimer.h create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_codegen.cpp create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_codegen.h create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_codegen_a64.cpp create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_codegen_x86.cpp create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_overhead.cpp create mode 100644 3rdparty/asmjit/testing/bench/asmjit_bench_regalloc.cpp create mode 100644 3rdparty/asmjit/testing/commons/asmjitutils.h rename 3rdparty/asmjit/{test => testing/commons}/cmdline.h (58%) create mode 100644 3rdparty/asmjit/testing/commons/performancetimer.h create mode 100644 3rdparty/asmjit/testing/commons/random.h rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_assembler.cpp (56%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_assembler.h (50%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_assembler_a64.cpp (98%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_assembler_x64.cpp (99%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_assembler_x86.cpp (98%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_compiler.cpp (63%) rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_compiler.h (70%) create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_compiler_a64.cpp create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_compiler_x86.cpp rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_emitters.cpp (59%) create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_environment.cpp create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_instinfo.cpp rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_misc.h (53%) create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_runner.cpp create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_unicompiler.cpp create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_unicompiler_avx2fma.cpp create mode 100644 3rdparty/asmjit/testing/tests/asmjit_test_unicompiler_sse2.cpp rename 3rdparty/asmjit/{test => testing/tests}/asmjit_test_x86_sections.cpp (56%) rename 3rdparty/asmjit/{test => testing/tests}/broken.cpp (90%) rename 3rdparty/asmjit/{test => testing/tests}/broken.h (98%) delete mode 100755 3rdparty/asmjit/tools/configure-makefiles.sh delete mode 100755 3rdparty/asmjit/tools/configure-ninja.sh delete mode 100755 3rdparty/asmjit/tools/configure-sanitizers.sh delete mode 100644 3rdparty/asmjit/tools/configure-vs2019-x64.bat delete mode 100644 3rdparty/asmjit/tools/configure-vs2019-x86.bat delete mode 100644 3rdparty/asmjit/tools/configure-vs2022-x64.bat delete mode 100644 3rdparty/asmjit/tools/configure-vs2022-x86.bat delete mode 100755 3rdparty/asmjit/tools/configure-xcode.sh diff --git a/3rdparty/asmjit/CMakeLists.txt b/3rdparty/asmjit/CMakeLists.txt index aa6df9d611147..064b25536a139 100644 --- a/3rdparty/asmjit/CMakeLists.txt +++ b/3rdparty/asmjit/CMakeLists.txt @@ -1,10 +1,18 @@ -cmake_minimum_required(VERSION 3.19 FATAL_ERROR) +# AsmJit +# ====== + +# To consume asmjit as a dependency, use asmjit::asmjit alias. + +cmake_minimum_required(VERSION 3.24 FATAL_ERROR) # Don't create a project if it was already created by another CMakeLists.txt. This makes # it possible to support both add_subdirectory() and include() ways of using AsmJit as a # dependency. if (NOT CMAKE_PROJECT_NAME OR "${CMAKE_PROJECT_NAME}" STREQUAL "asmjit") - project(asmjit CXX) + project(asmjit + LANGUAGES CXX + DESCRIPTION "Low-latency machine code generation" + HOMEPAGE_URL "https://asmjit.com") endif() include(CheckCXXCompilerFlag) @@ -43,6 +51,14 @@ if (ASMJIT_EMBED AND NOT ASMJIT_STATIC) set(ASMJIT_STATIC TRUE) endif() +if (NOT DEFINED ASMJIT_NO_DEPRECATED) + set(ASMJIT_NO_DEPRECATED FALSE) +endif() + +if (NOT DEFINED ASMJIT_NO_ABI_NAMESPACE) + set(ASMJIT_NO_ABI_NAMESPACE FALSE) +endif() + # AsmJit - Configuration - Backend # ================================ @@ -61,10 +77,6 @@ endif() # AsmJit - Configuration - Features # ================================= -if (NOT DEFINED ASMJIT_NO_DEPRECATED) - set(ASMJIT_NO_DEPRECATED FALSE) -endif() - if (NOT DEFINED ASMJIT_NO_SHM_OPEN) set(ASMJIT_NO_SHM_OPEN FALSE) endif() @@ -101,6 +113,14 @@ if (NOT DEFINED ASMJIT_NO_COMPILER) endif() endif() +if (NOT DEFINED ASMJIT_NO_UJIT) + if (ASMJIT_NO_COMPILER) + set(ASMJIT_NO_UJIT TRUE) + else() + set(ASMJIT_NO_UJIT FALSE) + endif() +endif() + # AsmJit - Configuration - CMake Introspection # ============================================ @@ -117,14 +137,17 @@ set(ASMJIT_NO_AARCH64 "${ASMJIT_NO_AARCH64}" CACHE BOOL "Disable AAr set(ASMJIT_NO_FOREIGN "${ASMJIT_NO_FOREIGN}" CACHE BOOL "Disable all foreign architectures (enables only a target architecture)") set(ASMJIT_NO_DEPRECATED "${ASMJIT_NO_DEPRECATED}" CACHE BOOL "Disable deprecated API at build time") +set(ASMJIT_NO_ABI_NAMESPACE "${ASMJIT_NO_ABI_NAMESPACE}" CACHE BOOL "Disable the use of ABI namespace (inline namespace in {asmjit} adding ABI version)") + set(ASMJIT_NO_SHM_OPEN "${ASMJIT_NO_SHM_OPEN}" CACHE BOOL "Disable the use of shm_open() even on platforms where it's supported") set(ASMJIT_NO_JIT "${ASMJIT_NO_JIT}" CACHE BOOL "Disable VirtMem, JitAllocator, and JitRuntime at build time") set(ASMJIT_NO_TEXT "${ASMJIT_NO_TEXT}" CACHE BOOL "Disable textual representation of instructions, enums, cpu features, ...") set(ASMJIT_NO_LOGGING "${ASMJIT_NO_LOGGING}" CACHE BOOL "Disable logging features at build time") set(ASMJIT_NO_VALIDATION "${ASMJIT_NO_VALIDATION}" CACHE BOOL "Disable instruction validation API at build time") set(ASMJIT_NO_INTROSPECTION "${ASMJIT_NO_INTROSPECTION}" CACHE BOOL "Disable instruction introspection API at build time") -set(ASMJIT_NO_BUILDER "${ASMJIT_NO_BUILDER}" CACHE BOOL "Disable Builder emitter at build time") -set(ASMJIT_NO_COMPILER "${ASMJIT_NO_COMPILER}" CACHE BOOL "Disable Compiler emitter at build time") +set(ASMJIT_NO_BUILDER "${ASMJIT_NO_BUILDER}" CACHE BOOL "Disable Builder at build time") +set(ASMJIT_NO_COMPILER "${ASMJIT_NO_COMPILER}" CACHE BOOL "Disable Compiler at build time") +set(ASMJIT_NO_UJIT "${ASMJIT_NO_UJIT}" CACHE BOOL "Disable UniCompiler at build time") # AsmJit - Project # ================ @@ -179,7 +202,7 @@ function(asmjit_detect_sanitizers out) set(${out} "${_out_array}" PARENT_SCOPE) endfunction() -function(asmjit_add_target target target_type) +function(asmjit_addapp target target_type) set(single_val "") set(multi_val SOURCES LIBRARIES CFLAGS CFLAGS_DBG CFLAGS_REL) cmake_parse_arguments("X" "" "${single_val}" "${multi_val}" ${ARGN}) @@ -195,7 +218,7 @@ function(asmjit_add_target target target_type) DEFINE_SYMBOL "" CXX_VISIBILITY_PRESET hidden) target_compile_options(${target} PRIVATE ${X_CFLAGS} ${ASMJIT_SANITIZE_CFLAGS} $<$:${X_CFLAGS_DBG}> $<$>:${X_CFLAGS_REL}>) - target_compile_features(${target} PUBLIC cxx_std_11) + target_compile_features(${target} PUBLIC cxx_std_17) target_link_options(${target} PRIVATE ${ASMJIT_PRIVATE_LFLAGS}) target_link_libraries(${target} PRIVATE ${X_LIBRARIES}) @@ -212,38 +235,41 @@ set(ASMJIT_INCLUDE_DIR "${ASMJIT_INCLUDE_DIRS}") if (NOT ASMJIT_NO_CUSTOM_FLAGS) if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_COMPILER_FRONTEND_VARIANT}" STREQUAL "xMSVC") - list(APPEND ASMJIT_PRIVATE_CFLAGS - -MP # [+] Multi-Process Compilation. - -GF # [+] Eliminate duplicate strings. - -Zc:__cplusplus # [+] Conforming __cplusplus definition. - -Zc:inline # [+] Remove unreferenced COMDAT. - -Zc:strictStrings # [+] Strict const qualification of string literals. - -Zc:threadSafeInit- # [-] Thread-safe statics. - -W4) # [+] Warning level 4. - - list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG - -GS) # [+] Buffer security-check. - - list(APPEND ASMJIT_PRIVATE_CFLAGS_REL - -GS- # [-] Buffer security-check. - -O2 # [+] Favor speed over size. - -Oi) # [+] Generate intrinsic functions. - elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "^(GNU|Clang|AppleClang)$") - list(APPEND ASMJIT_PRIVATE_CFLAGS -Wall -Wextra -Wconversion) - list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-math-errno) - list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2) + list(APPEND ASMJIT_PRIVATE_CFLAGS -W4) # [+] Warning level 4. + + list(APPEND ASMJIT_PRIVATE_CFLAGS -MP) # [+] Multi-Process Compilation. + list(APPEND ASMJIT_PRIVATE_CFLAGS -GF) # [+] Eliminate duplicate strings. + list(APPEND ASMJIT_PRIVATE_CFLAGS -Zc:__cplusplus) # [+] Conforming __cplusplus definition. + list(APPEND ASMJIT_PRIVATE_CFLAGS -Zc:inline) # [+] Remove unreferenced COMDAT. + list(APPEND ASMJIT_PRIVATE_CFLAGS -Zc:strictStrings) # [+] Strict const qualification of string literals. + list(APPEND ASMJIT_PRIVATE_CFLAGS -Zc:threadSafeInit-) # [-] Thread-safe statics. + + list(APPEND ASMJIT_PRIVATE_CFLAGS_DBG -GS) # [+] Buffer security-check. + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -GS-) # [-] Buffer security-check. + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2) # [+] Favor speed over size. + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -Oi) # [+] Generate intrinsic functions. + elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU|Clang") + list(APPEND ASMJIT_PRIVATE_CFLAGS -Wall -Wextra -Wconversion) # [+] Add baseline warnings that can be used safely even with system headers. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wdouble-promotion) # [+] Warn about double promotions. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wduplicated-cond) # [+] Warn about duplicate conditions. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wduplicated-branches) # [+] Warn about duplicate branches. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wlogical-op) # [+] Warn about suspicious uses of logical operators in expressions. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wlogical-not-parentheses) # [+] Warn about logical not used on the left hand side operand of a comparison. + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -Wrestrict) + + list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-math-errno) # [-] Disable math functions setting errno (performance reasons). + list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-threadsafe-statics) # [-] Don't add guards when initializing statics (we don't need it). + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2) # [+] Compiling with -O2 in release mode is what we generally want. + list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -fmerge-all-constants) # [+] We don't need unique address per constant (merging improves library size). # -fno-semantic-interposition is not available on apple - the compiler issues a warning, which is not detected. - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -fno-threadsafe-statics) - else() - asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -fno-threadsafe-statics -fno-semantic-interposition) + if (NOT APPLE) + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -fno-semantic-interposition) endif() - # The following flags can save few bytes in the resulting binary. - asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL - -fmerge-all-constants # Merge all constants even if it violates ISO C++. - -fno-enforce-eh-specs) # Don't enforce termination if noexcept function throws. + if (NOT "${CMAKE_SYSTEM_NAME}" STREQUAL "iOS") + asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS_REL -fno-enforce-eh-specs) # [-] Don't enforce termination if noexcept function throws. + endif() endif() endif() @@ -333,18 +359,21 @@ endif() foreach(build_option # AsmJit build options. ASMJIT_STATIC ASMJIT_NO_DEPRECATED + ASMJIT_NO_ABI_NAMESPACE # AsmJit backends selection. ASMJIT_NO_X86 ASMJIT_NO_AARCH64 ASMJIT_NO_FOREIGN # AsmJit features selection. + ASMJIT_NO_SHM_OPEN ASMJIT_NO_JIT ASMJIT_NO_TEXT ASMJIT_NO_LOGGING ASMJIT_NO_INTROSPECTION ASMJIT_NO_VALIDATION ASMJIT_NO_BUILDER - ASMJIT_NO_COMPILER) + ASMJIT_NO_COMPILER + ASMJIT_NO_UJIT) if (${build_option}) List(APPEND ASMJIT_CFLAGS "-D${build_option}") List(APPEND ASMJIT_PRIVATE_CFLAGS "-D${build_option}") @@ -371,6 +400,20 @@ set(ASMJIT_SRC_LIST asmjit/core.h asmjit/core/api-build_p.h asmjit/core/api-config.h + asmjit/core/arena.cpp + asmjit/core/arena.h + asmjit/core/arenabitset.cpp + asmjit/core/arenabitset_p.h + asmjit/core/arenahash.cpp + asmjit/core/arenahash.h + asmjit/core/arenalist.cpp + asmjit/core/arenalist.h + asmjit/core/arenapool.h + asmjit/core/arenastring.h + asmjit/core/arenatree.cpp + asmjit/core/arenatree.h + asmjit/core/arenavector.cpp + asmjit/core/arenavector.h asmjit/core/archtraits.cpp asmjit/core/archtraits.h asmjit/core/archcommons.h @@ -400,6 +443,7 @@ set(ASMJIT_SRC_LIST asmjit/core/environment.h asmjit/core/errorhandler.cpp asmjit/core/errorhandler.h + asmjit/core/fixup.h asmjit/core/formatter.cpp asmjit/core/formatter.h asmjit/core/func.cpp @@ -425,14 +469,18 @@ set(ASMJIT_SRC_LIST asmjit/core/osutils.h asmjit/core/osutils_p.h asmjit/core/raassignment_p.h - asmjit/core/rabuilders_p.h + asmjit/core/racfgblock_p.h + asmjit/core/racfgbuilder_p.h + asmjit/core/raconstraints_p.h asmjit/core/radefs_p.h + asmjit/core/rainst_p.h asmjit/core/ralocal.cpp asmjit/core/ralocal_p.h asmjit/core/rapass.cpp asmjit/core/rapass_p.h asmjit/core/rastack.cpp asmjit/core/rastack_p.h + asmjit/core/span.h asmjit/core/string.cpp asmjit/core/string.h asmjit/core/support.cpp @@ -443,26 +491,12 @@ set(ASMJIT_SRC_LIST asmjit/core/type.h asmjit/core/virtmem.cpp asmjit/core/virtmem.h - asmjit/core/zone.cpp - asmjit/core/zone.h - asmjit/core/zonehash.cpp - asmjit/core/zonehash.h - asmjit/core/zonelist.cpp - asmjit/core/zonelist.h - asmjit/core/zonestack.cpp - asmjit/core/zonestack.h - asmjit/core/zonestring.h - asmjit/core/zonetree.cpp - asmjit/core/zonetree.h - asmjit/core/zonevector.cpp - asmjit/core/zonevector.h asmjit/a64.h asmjit/arm.h asmjit/arm/armformatter.cpp asmjit/arm/armformatter_p.h asmjit/arm/armglobals.h - asmjit/arm/armoperand.h asmjit/arm/armutils.h asmjit/arm/a64archtraits_p.h asmjit/arm/a64assembler.cpp @@ -513,6 +547,15 @@ set(ASMJIT_SRC_LIST asmjit/x86/x86operand.h asmjit/x86/x86rapass.cpp asmjit/x86/x86rapass_p.h + + asmjit/ujit/ujitbase.h + asmjit/ujit/unicompiler.h + asmjit/ujit/unicompiler_a64.cpp + asmjit/ujit/unicompiler_x86.cpp + asmjit/ujit/unicompiler_utils_p.h + asmjit/ujit/uniop.h + asmjit/ujit/vecconsttable.cpp + asmjit/ujit/vecconsttable.h ) if (MSVC AND NOT ASMJIT_NO_NATVIS) @@ -552,12 +595,12 @@ message(" ASMJIT_PRIVATE_CFLAGS_REL=${ASMJIT_PRIVATE_CFLAGS_REL}") if (NOT ASMJIT_EMBED) # Add AsmJit target. - asmjit_add_target(asmjit "${ASMJIT_TARGET_TYPE}" - SOURCES ${ASMJIT_SRC} - LIBRARIES ${ASMJIT_DEPS} - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + asmjit_addapp(asmjit "${ASMJIT_TARGET_TYPE}" + SOURCES ${ASMJIT_SRC} + LIBRARIES ${ASMJIT_DEPS} + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) target_compile_options(asmjit INTERFACE ${ASMJIT_CFLAGS}) target_include_directories(asmjit BEFORE INTERFACE @@ -592,58 +635,46 @@ if (NOT ASMJIT_EMBED) enable_testing() # Special target that always uses embedded AsmJit. - asmjit_add_target(asmjit_test_unit TEST - SOURCES ${ASMJIT_SRC} - test/asmjit_test_unit.cpp - test/broken.cpp - test/broken.h - LIBRARIES ${ASMJIT_DEPS} - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - -DASMJIT_TEST - -DASMJIT_STATIC - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) - target_include_directories(asmjit_test_unit BEFORE PRIVATE ${ASMJIT_INCLUDE_DIRS}) - - asmjit_add_target(asmjit_test_assembler TEST - SOURCES test/asmjit_test_assembler.cpp - test/asmjit_test_assembler.h - test/asmjit_test_assembler_a64.cpp - test/asmjit_test_assembler_x64.cpp - test/asmjit_test_assembler_x86.cpp - LIBRARIES asmjit::asmjit - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) - - asmjit_add_target(asmjit_test_perf EXECUTABLE - SOURCES test/asmjit_test_perf.cpp - test/asmjit_test_perf_a64.cpp - test/asmjit_test_perf_x86.cpp - SOURCES test/asmjit_test_perf.h - LIBRARIES asmjit::asmjit - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) - - foreach(_target asmjit_test_emitters - asmjit_test_execute - asmjit_test_x86_sections) - asmjit_add_target(${_target} TEST - SOURCES test/${_target}.cpp - LIBRARIES asmjit::asmjit - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + asmjit_addapp(asmjit_test_runner TEST + SOURCES ${ASMJIT_SRC} + testing/tests/asmjit_test_runner.cpp + testing/tests/broken.cpp + testing/tests/broken.h + LIBRARIES ${ASMJIT_DEPS} + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + -DASMJIT_TEST + -DASMJIT_STATIC + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + target_include_directories(asmjit_test_runner BEFORE PRIVATE ${ASMJIT_INCLUDE_DIRS}) + + asmjit_addapp(asmjit_test_assembler TEST + SOURCES testing/tests/asmjit_test_assembler.cpp + testing/tests/asmjit_test_assembler.h + testing/tests/asmjit_test_assembler_a64.cpp + testing/tests/asmjit_test_assembler_x64.cpp + testing/tests/asmjit_test_assembler_x86.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + + foreach(app asmjit_test_environment asmjit_test_emitters asmjit_test_x86_sections) + asmjit_addapp(${app} TEST + SOURCES testing/tests/${app}.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) endforeach() if (NOT ASMJIT_NO_INTROSPECTION) - asmjit_add_target(asmjit_test_instinfo TEST - SOURCES test/asmjit_test_instinfo.cpp - LIBRARIES asmjit::asmjit - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + asmjit_addapp(asmjit_test_instinfo TEST + SOURCES testing/tests/asmjit_test_instinfo.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) endif() if (NOT (ASMJIT_NO_BUILDER OR ASMJIT_NO_COMPILER)) @@ -654,34 +685,79 @@ if (NOT ASMJIT_EMBED) set(ASMJIT_SSE2_CFLAGS "") check_cxx_source_compiles(" - #if defined(_M_IX86) || defined(__X86__) || defined(__i386__) - int target_is_32_bit_x86() { return 1; } - #else - // Compile error... + #if defined(_M_X64) || defined(__x86_64__) + // Skip... + #elif defined(_M_IX86) || defined(__X86__) || defined(__i386__) + int target_arch_is_x86() { return 1; } #endif + int main() { return target_arch_is_x86(); } + " ASMJIT_TARGET_ARCH_X86) - int main() { - return target_is_32_bit_x86(); - } - " ASMJIT_TARGET_IS_32_BIT_X86) + check_cxx_source_compiles(" + #if defined(_M_X64) || defined(__x86_64__) + int target_arch_is_x86_64() { return 1; } + #endif + int main() { return target_arch_is_x86_64(); } + " ASMJIT_TARGET_ARCH_X86_64) - if (ASMJIT_TARGET_IS_32_BIT_X86) - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_COMPILER_FRONTEND_VARIANT}" STREQUAL "xMSVC") - asmjit_detect_cflags(ASMJIT_SSE2_CFLAGS "-arch:SSE2") - else() - asmjit_detect_cflags(ASMJIT_SSE2_CFLAGS "-msse2") + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC" OR "x${CMAKE_CXX_COMPILER_FRONTEND_VARIANT}" STREQUAL "xMSVC") + if (ASMJIT_TARGET_ARCH_X86) + asmjit_detect_cflags(ASMJIT_SSE2_CFLAGS -arch:SSE2) + endif() + if (ASMJIT_TARGET_ARCH_X86 OR ASMJIT_TARGET_ARCH_X86_64) + asmjit_detect_cflags(ASMJIT_AVX2FMA_CFLAGS -arch:AVX2) + endif() + else() + if (ASMJIT_TARGET_ARCH_X86) + asmjit_detect_cflags(ASMJIT_SSE2_CFLAGS -msse2) + endif() + if (ASMJIT_TARGET_ARCH_X86 OR ASMJIT_TARGET_ARCH_X86_64) + asmjit_detect_cflags(ASMJIT_AVX2FMA_CFLAGS -mavx2 -mfma) endif() endif() - asmjit_add_target(asmjit_test_compiler TEST - SOURCES test/asmjit_test_compiler.cpp - test/asmjit_test_compiler.h - test/asmjit_test_compiler_a64.cpp - test/asmjit_test_compiler_x86.cpp - LIBRARIES asmjit::asmjit - CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_SSE2_CFLAGS} - CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} - CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + + asmjit_addapp(asmjit_test_compiler TEST + SOURCES testing/tests/asmjit_test_compiler.cpp + testing/tests/asmjit_test_compiler.h + testing/tests/asmjit_test_compiler_a64.cpp + testing/tests/asmjit_test_compiler_x86.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_SSE2_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) endif() + if (NOT ASMJIT_NO_UJIT) + asmjit_addapp(asmjit_test_unicompiler TEST + SOURCES testing/tests/asmjit_test_unicompiler.cpp + testing/tests/asmjit_test_unicompiler_sse2.cpp + testing/tests/asmjit_test_unicompiler_avx2fma.cpp + testing/tests/broken.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_SSE2_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + set_property(SOURCE testing/tests/asmjit_test_unicompiler_avx2fma.cpp APPEND PROPERTY COMPILE_OPTIONS ${ASMJIT_AVX2FMA_CFLAGS}) + endif() + + asmjit_addapp(asmjit_bench_codegen EXECUTABLE + SOURCES testing/bench/asmjit_bench_codegen.cpp + testing/bench/asmjit_bench_codegen_a64.cpp + testing/bench/asmjit_bench_codegen_x86.cpp + SOURCES testing/bench/asmjit_bench_codegen.h + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + + foreach(app asmjit_bench_overhead asmjit_bench_regalloc) + asmjit_addapp(${app} TEST + SOURCES testing/bench/${app}.cpp + LIBRARIES asmjit::asmjit + CFLAGS ${ASMJIT_PRIVATE_CFLAGS} + CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG} + CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL}) + endforeach() + endif() endif() diff --git a/3rdparty/asmjit/LICENSE.md b/3rdparty/asmjit/LICENSE.md index d87dbf9bca28e..7818b21e52a8e 100644 --- a/3rdparty/asmjit/LICENSE.md +++ b/3rdparty/asmjit/LICENSE.md @@ -1,4 +1,4 @@ -Copyright (c) 2008-2024 The AsmJit Authors +Copyright (c) 2008-2025 Petr Kobalicek This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages diff --git a/3rdparty/asmjit/README.md b/3rdparty/asmjit/README.md index ee0c7200ed3a1..fb03e75e3a1dc 100644 --- a/3rdparty/asmjit/README.md +++ b/3rdparty/asmjit/README.md @@ -1,7 +1,7 @@ AsmJit ------ -AsmJit is a lightweight library for machine code generation written in C++ language. +AsmJit is a library for low-latency machine code generation written in C++. * [Official Home Page (asmjit.com)](https://asmjit.com) * [Official Repository (asmjit/asmjit)](https://github.com/asmjit/asmjit) @@ -10,17 +10,44 @@ AsmJit is a lightweight library for machine code generation written in C++ langu See [asmjit.com](https://asmjit.com) page for more details, examples, and documentation. -Documentation -------------- +Project Organization +-------------------- - * [Documentation Index](https://asmjit.com/doc/index.html) - * [Build Instructions](https://asmjit.com/doc/group__asmjit__build.html) + * **`/`** - Project root + * **src** - Source code + * **asmjit** - Source code and headers (always point include path in here) + * **core** - Core API, backend independent except relocations + * **arm** - ARM specific API, designed to be common for both AArch32 and AArch64 + * **a64** - AArch64 specific API, used only by AArch64 backends + * **x86** - X86 specific API, used only by X86 and X64 backends + * **ujit** - Universal JIT API + * **testing** - Unit tests, integration tests, and benchmarks (don't embed in your project) + * **commons** - Common utilities shared between tests and benchmarks + * **bench** - Benchmarks + * **tests** - Unit tests and integration tests + * **tools** - Tools used to re-regenerate generated files (instruction DB, enum strings) + +Roadmap +------- + + * See [Roadmap](https://asmjit.com/roadmap.html) page for more details Contributing ------------ * See [CONTRIBUTING](./CONTRIBUTING.md) page for more details +Documentation +------------- + + * [Documentation Index](https://asmjit.com/doc/index.html) + * [Build Instructions](https://asmjit.com/doc/group__asmjit__build.html) (includes [CMake Integration](https://asmjit.com/doc/group__asmjit__build.html#cmake_integration)) + +Development & Testing +--------------------- + + * Basic configure scripts that invoke cmake are provided in project root. + Breaking Changes ---------------- @@ -28,43 +55,24 @@ Breaking the API is sometimes inevitable, what to do? * See [Breaking Changes Guide](https://asmjit.com/doc/group__asmjit__breaking__changes.html), which is now part of AsmJit documentation * See asmjit tests, they always compile and provide implementation of many use-cases: - * [asmjit_test_emitters.cpp](./test/asmjit_test_emitters.cpp) - Tests that demonstrate the purpose of emitters - * [asmjit_test_assembler_x86.cpp](./test/asmjit_test_assembler_x86.cpp) - Tests targeting AsmJit's Assembler (x86/x64) - * [asmjit_test_compiler_x86.cpp](./test/asmjit_test_compiler_x86.cpp) - Tests targeting AsmJit's Compiler (x86/x64) - * [asmjit_test_instinfo.cpp](./test/asmjit_test_instinfo.cpp) - Tests that query instruction information - * [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) - Multiple sections test. + * [asmjit_test_emitters.cpp](./testing/tests/asmjit_test_emitters.cpp) - Tests that demonstrate the purpose of emitters + * [asmjit_test_assembler_x86.cpp](./testing/tests/asmjit_test_assembler_x86.cpp) - Tests targeting AsmJit's Assembler (x86/x64) + * [asmjit_test_compiler_x86.cpp](./testing/tests/asmjit_test_compiler_x86.cpp) - Tests targeting AsmJit's Compiler (x86/x64) + * [asmjit_test_instinfo.cpp](./testing/tests/asmjit_test_instinfo.cpp) - Tests that query instruction information + * [asmjit_test_x86_sections.cpp](./testing/tests/asmjit_test_x86_sections.cpp) - Multiple sections test * Visit our [Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help -Project Organization --------------------- - - * **`/`** - Project root - * **src** - Source code - * **asmjit** - Source code and headers (always point include path in here) - * **core** - Core API, backend independent except relocations - * **arm** - ARM specific API, used only by ARM and AArch64 backends - * **x86** - X86 specific API, used only by X86 and X64 backends - * **test** - Unit and integration tests (don't embed in your project) - * **tools** - Tools used for configuring, documenting, and generating files - -Ports ------ - - * [ ] 32-bit ARM/Thumb port (work in progress) - * [ ] RISC-V port (not in progress, help welcome) - Support ------- * AsmJit project has both community and commercial support, see [AsmJit's Support Page](https://asmjit.com/support.html) - * You can help the development and maintenance through Petr Kobalicek's [GitHub sponsors Profile](https://github.com/sponsors/kobalicek) + * Organizations that rely on AsmJit should support the development! Notable Donors List: * [ZehMatt](https://github.com/ZehMatt) - Authors & Maintainers --------------------- - * Petr Kobalicek + * Petr Kobalicek ([website](https://kobalicek.com)) diff --git a/3rdparty/asmjit/configure.sh b/3rdparty/asmjit/configure.sh new file mode 100644 index 0000000000000..659d6dc8c72e3 --- /dev/null +++ b/3rdparty/asmjit/configure.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +BUILD_OPTIONS="-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DASMJIT_TEST=1" + +echo "== [Configuring Build - Debug] ==" +eval cmake . -B build/Debug -DCMAKE_BUILD_TYPE=Debug ${BUILD_OPTIONS} "$@" +echo "" + +echo "== [Configuring Build - Release] ==" +eval cmake . -B build/Release -DCMAKE_BUILD_TYPE=Release ${BUILD_OPTIONS} "$@" +echo "" diff --git a/3rdparty/asmjit/configure_sanitizers.sh b/3rdparty/asmjit/configure_sanitizers.sh new file mode 100644 index 0000000000000..2634bcdf57ed7 --- /dev/null +++ b/3rdparty/asmjit/configure_sanitizers.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +BUILD_OPTIONS="-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DASMJIT_TEST=1" + +echo "== [Configuring Build - Release_ASAN] ==" +eval cmake . -B build/Release_ASAN ${BUILD_OPTIONS} -DCMAKE_BUILD_TYPE=Release -DASMJIT_SANITIZE=address "$@" +echo "" + +echo "== [Configuring Build - Release_MSAN] ==" +eval cmake . -B build/Release_MSAN ${BUILD_OPTIONS} -DCMAKE_BUILD_TYPE=Release -DASMJIT_SANITIZE=memory "$@" +echo "" + +echo "== [Configuring Build - Release_UBSAN] ==" +eval cmake . -B build/Release_UBSAN ${BUILD_OPTIONS} -DCMAKE_BUILD_TYPE=Release -DASMJIT_SANITIZE=undefined "$@" +echo "" diff --git a/3rdparty/asmjit/configure_vs2022_x64.bat b/3rdparty/asmjit/configure_vs2022_x64.bat new file mode 100644 index 0000000000000..9bbadd7a3fd23 --- /dev/null +++ b/3rdparty/asmjit/configure_vs2022_x64.bat @@ -0,0 +1,2 @@ +@echo off +cmake . -B build_x64 -G"Visual Studio 17" -A x64 -DASMJIT_TEST=ON diff --git a/3rdparty/asmjit/configure_vs2022_x86.bat b/3rdparty/asmjit/configure_vs2022_x86.bat new file mode 100644 index 0000000000000..8c123a0a2816e --- /dev/null +++ b/3rdparty/asmjit/configure_vs2022_x86.bat @@ -0,0 +1,2 @@ +@echo off +cmake . -B build_x86 -G"Visual Studio 17" -A Win32 -DASMJIT_TEST=ON diff --git a/3rdparty/asmjit/src/asmjit.natvis b/3rdparty/asmjit/src/asmjit.natvis index 68012e0d15aa1..c107c7f7aba29 100644 --- a/3rdparty/asmjit/src/asmjit.natvis +++ b/3rdparty/asmjit/src/asmjit.natvis @@ -22,7 +22,7 @@ - + {{ [size={_size, d} capacity={_capacity, d}] }} _size, d @@ -35,10 +35,10 @@ - + - - + + @@ -48,36 +48,36 @@ - [None] - [Reg] {{ type={regType()} group={regGroup()} size={opSize(), d} }} - [Mem] {{ base={memBaseType()} index={memIndexType()} }} - [Imm] {{ type={immType()} }} - [Label] - [Unknown] + [None] + [Reg] {{ type={reg_type()} group={reg_group()} size={opSize(), d} }} + [Mem] {{ base={memBaseType()} index={memIndexType()} }} + [Imm] {{ type={immType()} }} + [Label] + [Unknown] _bits, X - opType() - regType() - regGroup() - opSize(), d - memBaseType() - memIndexType() - memRegHome() - opSize(), d - memX86Segment() - memX86AddrType() - memX86ShiftValue() - memX86Broadcast() - immType() + op_type() + reg_type() + reg_group() + opSize(), d + memBaseType() + memIndexType() + memRegHome() + opSize(), d + memX86Segment() + memX86AddrType() + memX86ShiftValue() + memX86Broadcast() + immType() - + - + - - + + @@ -85,161 +85,161 @@ - + - + - [None] - [Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }} - [Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }} - [Imm] {{ val={immValue(), d} hex={immValue(), X} }} - [Label] {{ id={_baseId} }} - [Unknown] + [None] + [Reg] {{ id={_base_id, d} group={reg_group(), d} type={reg_type(), d} size={opSize(), d} }} + [Mem] {{ base_id={memBaseId(), d} index_id={memIndexId(), d} offset={(__int64)memOffset(), d} }} + [Imm] {{ val={immValue(), d} hex={immValue(), X} }} + [Label] {{ id={_base_id} }} + [Unknown] _signature._bits, X - opType() + op_type() opSize(), d - regType() - regGroup() - _baseId, d - memBaseType() - memBaseId() - memIndexType() - memIndexId() - memRegHome() - memOffset(), d - memX86Segment() - memX86AddrType() - memX86ShiftValue() - memX86Broadcast() - immType() - immValue(), X - _baseId, d - _baseId + reg_type() + reg_group() + _base_id, d + memBaseType() + memBaseId() + memIndexType() + memIndexId() + memRegHome() + memOffset(), d + memX86Segment() + memX86AddrType() + memX86ShiftValue() + memX86Broadcast() + immType() + immValue(), X + _base_id, d + _base_id _data[0] _data[1] - + - + - - - - + + + + - [RegValue {{ regType={regType()} indirect={isIndirect()} done={isDone()} }}] - [StackValue {{ indirect={isIndirect()} done={isDone()} }}] - [Unknown] + [RegValue {{ reg_type={reg_type()} indirect={isIndirect()} done={is_done()} }}] + [StackValue {{ indirect={isIndirect()} done={is_done()} }}] + [Unknown] _data - (asmjit::TypeId)(typeId()) - (asmjit::BaseReg::RegType)regType() - regId() - stackOffset() + (asmjit::TypeId)(type_id()) + (asmjit::RegType)reg_type() + reg_id() + stack_offset() - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - [InstNode] - [SectionNode] - [LabelNode] - [AlignNode] - [EmbedDataNode] - [EmbedLabelNode] - [EmbedLabelDeltaNode] - [ConstPoolNode] - [CommentNode] - [SentinelNode] + + + + + + + + [InstNode] + [SectionNode] + [LabelNode] + [AlignNode] + [EmbedDataNode] + [EmbedLabelNode] + [EmbedLabelDeltaNode] + [ConstPoolNode] + [CommentNode] + [SentinelNode] [JumpNode] - [FuncNode] - [FuncRetNode] - [InvokeNode] + [FuncNode] + [FuncRetNode] + [InvokeNode] [UnknownNode {nodeType(), d}] _prev _next - _any._nodeType - _any._nodeFlags + _node_type + _node_flags _position - _userDataU64 - _userDataPtr - _passData - _inlineComment, s8 + _userDataU64 + _userDataPtr + _passData + _inline_comment, s8 ((asmjit::InstNode*)this)->_baseInst - _inst._opCount - _inst._opCapacity - ((asmjit::InstNode*)this)->_opArray, [_inst._opCount] + _inst._op_count + _inst._op_capacity + ((asmjit::InstNode*)this)->_opArray, [_inst._op_count] - ((asmjit::SectionNode*)this)->_id - ((asmjit::SectionNode*)this)->_nextSection + ((asmjit::SectionNode*)this)->_section_id + ((asmjit::SectionNode*)this)->_nextSection - ((asmjit::LabelNode*)this)->_labelId + ((asmjit::LabelNode*)this)->_label_id - ((asmjit::AlignNode*)this)->_alignData._alignMode - ((asmjit::AlignNode*)this)->_alignment + ((asmjit::AlignNode*)this)->_align_data._align_mode + ((asmjit::AlignNode*)this)->_alignment - _embed._typeId, d - _embed._typeSize, d - ((asmjit::EmbedDataNode*)this)->_itemCount - ((asmjit::EmbedDataNode*)this)->_repeatCount - ((asmjit::EmbedDataNode*)this)->_inlineData - ((asmjit::EmbedDataNode*)this)->_externalData + _embed._type_id, d + _embed._type_size, d + ((asmjit::EmbedDataNode*)this)->_item_count + ((asmjit::EmbedDataNode*)this)->_repeat_count + ((asmjit::EmbedDataNode*)this)->_inlineData + ((asmjit::EmbedDataNode*)this)->_externalData - ((asmjit::EmbedLabelNode*)this)->_labelId + ((asmjit::EmbedLabelNode*)this)->_label_id - ((asmjit::EmbedLabelDeltaNode*)this)->_labelId - ((asmjit::EmbedLabelDeltaNode*)this)->_baseLabelId - ((asmjit::EmbedLabelDeltaNode*)this)->_dataSize + ((asmjit::EmbedLabelDeltaNode*)this)->_label_id + ((asmjit::EmbedLabelDeltaNode*)this)->_base_label_id + ((asmjit::EmbedLabelDeltaNode*)this)->_data_size - ((asmjit::ConstPoolNode*)this)->_constPool + ((asmjit::ConstPoolNode*)this)->_const_pool - _sentinel._sentinelType + _sentinel._sentinel_type ((asmjit::JumpNode*)this)->_annotation - ((asmjit::FuncNode*)this)->_funcDetail - ((asmjit::FuncNode*)this)->_frame - ((asmjit::FuncNode*)this)->_exitNode - ((asmjit::FuncNode*)this)->_end - ((asmjit::FuncNode*)this)->_args, [((asmjit::FuncNode*)this)->_funcDetail._argCount] + ((asmjit::FuncNode*)this)->_func_detail + ((asmjit::FuncNode*)this)->_frame + ((asmjit::FuncNode*)this)->_exit_node + ((asmjit::FuncNode*)this)->_end + ((asmjit::FuncNode*)this)->_args, [((asmjit::FuncNode*)this)->_func_detail._arg_count] - ((asmjit::InvokeNode*)this)->_funcDetail - ((asmjit::InvokeNode*)this)->_rets - ((asmjit::InvokeNode*)this)->_args, [((asmjit::InvokeNode*)this)->_funcDetail._argCount] + ((asmjit::InvokeNode*)this)->_func_detail + ((asmjit::InvokeNode*)this)->_rets + ((asmjit::InvokeNode*)this)->_args, [((asmjit::InvokeNode*)this)->_func_detail._arg_count] diff --git a/3rdparty/asmjit/src/asmjit/a64.h b/3rdparty/asmjit/src/asmjit/a64.h index e849eb3a8ca30..a2fd47b3fb51f 100644 --- a/3rdparty/asmjit/src/asmjit/a64.h +++ b/3rdparty/asmjit/src/asmjit/a64.h @@ -1,6 +1,6 @@ // This file is part of AsmJit project // -// See asmjit.h or LICENSE.md for license and copyright information +// See or LICENSE.md for license and copyright information // SPDX-License-Identifier: Zlib #ifndef ASMJIT_A64_H_INCLUDED @@ -26,21 +26,13 @@ //! //! ### Register Operands //! -//! - \ref arm::Reg - Base class of all AArch32/AArch64 registers. -//! - \ref a64::Gp - General purpose register (AArch64): -//! - \ref a64::GpW - 32-bit general purpose register (AArch64). -//! - \ref a64::GpX - 64-bit general purpose register (AArch64). -//! - \ref a64::Vec - Vector (SIMD) register: -//! - \ref a64::VecB - 8-bit SIMD register. -//! - \ref a64::VecH - 16-bit SIMD register. -//! - \ref a64::VecS - 32-bit SIMD register. -//! - \ref a64::VecD - 64-bit SIMD register. -//! - \ref a64::VecV - 128-bit SIMD register. +//! - \ref a64::Gp - General purpose register (abstracts 32-bit and 64-bit general purpose registers). +//! - \ref a64::Vec - Vector register (abstracts B, H, S, D, and Q NEON register with possible element type and index). //! //! ### Memory Operands //! -//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features -//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending. +//! - \ref a64::Mem - AArch64 memory operand that provides support for all ARM addressing features including base, +//! index, pre/post increment, and ARM-specific shift addressing + index extending. //! //! ### Other //! @@ -48,13 +40,16 @@ //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. #include "./arm.h" -#include "./arm/a64assembler.h" -#include "./arm/a64builder.h" -#include "./arm/a64compiler.h" -#include "./arm/a64emitter.h" -#include "./arm/a64globals.h" -#include "./arm/a64instdb.h" -#include "./arm/a64operand.h" + +#include "asmjit-scope-begin.h" +#include "arm/a64assembler.h" +#include "arm/a64builder.h" +#include "arm/a64compiler.h" +#include "arm/a64emitter.h" +#include "arm/a64globals.h" +#include "arm/a64instdb.h" +#include "arm/a64operand.h" +#include "asmjit-scope-end.h" #endif // ASMJIT_A64_H_INCLUDED diff --git a/3rdparty/asmjit/src/asmjit/arm.h b/3rdparty/asmjit/src/asmjit/arm.h index e4a3e3d3ac552..a4cd5fa28148d 100644 --- a/3rdparty/asmjit/src/asmjit/arm.h +++ b/3rdparty/asmjit/src/asmjit/arm.h @@ -1,6 +1,6 @@ // This file is part of AsmJit project // -// See asmjit.h or LICENSE.md for license and copyright information +// See or LICENSE.md for license and copyright information // SPDX-License-Identifier: Zlib #ifndef ASMJIT_ARM_H_INCLUDED @@ -11,20 +11,13 @@ //! ### Namespaces //! //! - \ref arm - arm namespace provides common functionality for both AArch32 and AArch64 backends. -//! - \ref a32 - a32 namespace provides support for AArch32 architecture. In addition it includes //! \ref arm namespace, so you can only use a single namespace when targeting AArch32 architecture. //! - \ref a64 - a64 namespace provides support for AArch64 architecture. In addition it includes //! \ref arm namespace, so you can only use a single namespace when targeting AArch64 architecture. //! //! ### Emitters //! -//! - AArch32 -//! - \ref a32::Assembler - AArch32 assembler (must read, provides examples). -//! - \ref a32::Builder - AArch32 builder. -//! - \ref a32::Compiler - AArch32 compiler. -//! - \ref a32::Emitter - AArch32 emitter (abstract). -//! -//! - AArch64 +//! - AArch64: //! - \ref a64::Assembler - AArch64 assembler (must read, provides examples). //! - \ref a64::Builder - AArch64 builder. //! - \ref a64::Compiler - AArch64 compiler. @@ -32,13 +25,6 @@ //! //! ### Supported Instructions //! -//! - AArch32: -//! - Emitters: -//! - \ref a32::EmitterExplicitT - Provides all instructions that use explicit operands, provides also -//! utility functions. The member functions provided are part of all AArch32 emitters. -//! - Instruction representation: -//! - \ref a32::Inst::Id - instruction identifiers. -//! //! - AArch64: //! - Emitters: //! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also @@ -46,39 +32,26 @@ //! - Instruction representation: //! - \ref a64::Inst::Id - instruction identifiers. //! -//! ### Register Operands +//! ### ARM Operands //! -//! - \ref arm::Reg - Base class of all AArch32/AArch64 registers. -//! - \ref a32::Gp - 32-bit general purpose register used by AArch32: +//! - AArch64: //! - \ref a64::Gp - 32-bit or 64-bit general purpose register used by AArch64: -//! - \ref a64::GpW - 32-bit register (AArch64). -//! - \ref a64::GpX - 64-bit register (AArch64). -//! - \ref arm::BaseVec - Base vector (SIMD) register. -//! - \ref a32::Vec - Vector (SIMD) register (AArch32): -//! - \ref a32::VecS - 32-bit SIMD register (AArch32). -//! - \ref a32::VecD - 64-bit SIMD register (AArch32). -//! - \ref a32::VecV - 128-bit SIMD register (AArch32). -//! - \ref a64::Vec - Vector (SIMD) register (AArch64): -//! - \ref a64::VecB - 8-bit SIMD register (AArch64). -//! - \ref a64::VecH - 16-bit SIMD register (AArch64). -//! - \ref a64::VecS - 32-bit SIMD register (AArch64). -//! - \ref a64::VecD - 64-bit SIMD register (AArch64). -//! - \ref a64::VecV - 128-bit SIMD register (AArch64). +//! - \ref a64::Vec - Vector (SIMD) register. +//! - \ref a64::Mem - AArch64 memory operand that provides support for all AArch64 addressing features +//! including base, index, pre/post increment, and AArch64 specific shift/extend of memory index. //! //! ### Memory Operands //! -//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features -//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending. -//! //! ### Other //! //! - \ref arm::Shift - Shift operation and value (both AArch32 and AArch64). -//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode. //! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64. -#include "./core.h" -#include "./arm/armglobals.h" -#include "./arm/armoperand.h" -#include "./arm/armutils.h" +#include "core.h" + +#include "asmjit-scope-begin.h" +#include "arm/armglobals.h" +#include "arm/armutils.h" +#include "asmjit-scope-end.h" #endif // ASMJIT_ARM_H_INCLUDED diff --git a/3rdparty/asmjit/src/asmjit/arm/a64archtraits_p.h b/3rdparty/asmjit/src/asmjit/arm/a64archtraits_p.h index 4b5bde68a9ed0..9b279e0fcee7d 100644 --- a/3rdparty/asmjit/src/asmjit/arm/a64archtraits_p.h +++ b/3rdparty/asmjit/src/asmjit/arm/a64archtraits_p.h @@ -1,6 +1,6 @@ // This file is part of AsmJit project // -// See asmjit.h or LICENSE.md for license and copyright information +// See or LICENSE.md for license and copyright information // SPDX-License-Identifier: Zlib #ifndef ASMJIT_ARM_A64ARCHTRAITS_P_H_INCLUDED @@ -18,20 +18,30 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) //! \addtogroup asmjit_a64 //! \{ -static const constexpr ArchTraits a64ArchTraits = { +static const constexpr ArchTraits a64_arch_traits = { // SP/FP/LR/PC. - Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFF, + Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFFu, // Reserved. - { 0, 0, 0 }, + { 0u, 0u, 0u }, // HW stack alignment (AArch64 requires stack aligned to 16 bytes at HW level). - 16, + 16u, - // Min/max stack offset - byte addressing is the worst, VecQ addressing the best. + // Min/max stack offset - byte addressing is the worst, vec.q addressing the best. 4095, 65520, - // Instruction hints [Gp, Vec, ExtraVirt2, ExtraVirt3]. + // Supported register types. + 0u | (1u << uint32_t(RegType::kGp32 )) + | (1u << uint32_t(RegType::kGp64 )) + | (1u << uint32_t(RegType::kVec8 )) + | (1u << uint32_t(RegType::kVec16 )) + | (1u << uint32_t(RegType::kVec32 )) + | (1u << uint32_t(RegType::kVec64 )) + | (1u << uint32_t(RegType::kVec128)) + | (1u << uint32_t(RegType::kMask )), + + // Instruction hints [Gp, Vec, Mask, Extra]. {{ InstHints::kPushPop, InstHints::kPushPop, @@ -39,29 +49,19 @@ static const constexpr ArchTraits a64ArchTraits = { InstHints::kNoHints }}, - // RegInfo. - #define V(index) OperandSignature{RegTraits::kSignature} - {{ ASMJIT_LOOKUP_TABLE_32(V, 0) }}, - #undef V - - // RegTypeToTypeId. - #define V(index) TypeId(RegTraits::kTypeId) - {{ ASMJIT_LOOKUP_TABLE_32(V, 0) }}, - #undef V - // TypeIdToRegType. - #define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kARM_GpW : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kARM_GpX : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kARM_GpX : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kARM_GpX : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kARM_GpX : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kARM_VecS : \ - index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kARM_VecD : RegType::kNone) + #define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kGp32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kGp64 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kGp64 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kGp64 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kGp64 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kVec32 : \ + index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kVec64 : RegType::kNone) {{ ASMJIT_LOOKUP_TABLE_32(V, 0) }}, #undef V diff --git a/3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp b/3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp index 32514b9bad9fb..a35c1ad3e3b1a 100644 --- a/3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp +++ b/3rdparty/asmjit/src/asmjit/arm/a64assembler.cpp @@ -1,6 +1,6 @@ // This file is part of AsmJit project // -// See asmjit.h or LICENSE.md for license and copyright information +// See or LICENSE.md for license and copyright information // SPDX-License-Identifier: Zlib #include "../core/api-build_p.h" @@ -13,6 +13,7 @@ #include "../core/logger.h" #include "../core/misc_p.h" #include "../core/support.h" + #include "../arm/armformatter_p.h" #include "../arm/armutils.h" #include "../arm/a64assembler.h" @@ -24,13 +25,13 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64) // a64::Assembler - Utils // ====================== -static ASMJIT_FORCE_INLINE constexpr uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); } -static ASMJIT_FORCE_INLINE constexpr uint32_t diff(VecElementType elementType, VecElementType baseType) noexcept { return uint32_t(elementType) - uint32_t(baseType); } +static ASMJIT_INLINE_CONSTEXPR uint32_t diff(RegType a, RegType b) noexcept { return uint32_t(a) - uint32_t(b); } +static ASMJIT_INLINE_CONSTEXPR uint32_t diff(VecElementType element_type, VecElementType base_type) noexcept { return uint32_t(element_type) - uint32_t(base_type); } // a64::Assembler - Cond // ===================== -static inline uint32_t condCodeToOpcodeCond(uint32_t cond) noexcept { return (uint32_t(cond) - 2u) & 0xFu; } +static inline uint32_t cond_code_to_opcode_field(uint32_t cond) noexcept { return (uint32_t(cond) - 2u) & 0xFu; } // a64::Assembler - Bits // ===================== @@ -50,21 +51,21 @@ static constexpr uint32_t kWX = InstDB::kWX; index == uint32_t(ShiftOp::kLSL) ? 3u : \ index == uint32_t(ShiftOp::kSXTW) ? 6u : \ index == uint32_t(ShiftOp::kSXTX) ? 7u : 0xFF -static const uint8_t armShiftOpToLdStOptMap[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) }; +static const uint8_t shift_op_to_ld_st_opt_map[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) }; #undef VALUE // a64::Assembler - ExtendOpToRegType // ================================== -static inline RegType extendOptionToRegType(uint32_t option) noexcept { - uint32_t pred = (uint32_t(RegType::kARM_GpW) << (0x0 * 4)) | // 0b000 - UXTB. - (uint32_t(RegType::kARM_GpW) << (0x1 * 4)) | // 0b001 - UXTH. - (uint32_t(RegType::kARM_GpW) << (0x2 * 4)) | // 0b010 - UXTW. - (uint32_t(RegType::kARM_GpX) << (0x3 * 4)) | // 0b011 - UXTX|LSL. - (uint32_t(RegType::kARM_GpW) << (0x4 * 4)) | // 0b100 - SXTB. - (uint32_t(RegType::kARM_GpW) << (0x5 * 4)) | // 0b101 - SXTH. - (uint32_t(RegType::kARM_GpW) << (0x6 * 4)) | // 0b110 - SXTW. - (uint32_t(RegType::kARM_GpX) << (0x7 * 4)) ; // 0b111 - SXTX. +static inline RegType extend_option_to_reg_type(uint32_t option) noexcept { + uint32_t pred = (uint32_t(RegType::kGp32) << (0x0 * 4)) | // 0b000 - UXTB. + (uint32_t(RegType::kGp32) << (0x1 * 4)) | // 0b001 - UXTH. + (uint32_t(RegType::kGp32) << (0x2 * 4)) | // 0b010 - UXTW. + (uint32_t(RegType::kGp64) << (0x3 * 4)) | // 0b011 - UXTX|LSL. + (uint32_t(RegType::kGp32) << (0x4 * 4)) | // 0b100 - SXTB. + (uint32_t(RegType::kGp32) << (0x5 * 4)) | // 0b101 - SXTH. + (uint32_t(RegType::kGp32) << (0x6 * 4)) | // 0b110 - SXTW. + (uint32_t(RegType::kGp64) << (0x7 * 4)) ; // 0b111 - SXTX. return RegType((pred >> (option * 4u)) & 0xFu); } @@ -74,51 +75,64 @@ static inline RegType extendOptionToRegType(uint32_t option) noexcept { //! Struct that contains Size (2 bits), Q flag, and S (scalar) flag. These values //! are used to encode Q, Size, and Scalar fields in an opcode. struct SizeOp { - enum : uint8_t { - k128BitShift = 0, - kScalarShift = 1, - kSizeShift = 2, - - kQ = uint8_t(1u << k128BitShift), - kS = uint8_t(1u << kScalarShift), - - k00 = uint8_t(0 << kSizeShift), - k01 = uint8_t(1 << kSizeShift), - k10 = uint8_t(2 << kSizeShift), - k11 = uint8_t(3 << kSizeShift), - - k00Q = k00 | kQ, - k01Q = k01 | kQ, - k10Q = k10 | kQ, - k11Q = k11 | kQ, - - k00S = k00 | kS, - k01S = k01 | kS, - k10S = k10 | kS, - k11S = k11 | kS, - - kInvalid = 0xFFu, - - // Masks used by SizeOpMap. - kSzQ = (0x3u << kSizeShift) | kQ, - kSzS = (0x3u << kSizeShift) | kS, - kSzQS = (0x3u << kSizeShift) | kQ | kS - }; + //! \name Constants + //! \{ + + static inline constexpr uint8_t k128BitShift = 0; + static inline constexpr uint8_t kScalarShift = 1; + static inline constexpr uint8_t kSizeShift = 2; + + static inline constexpr uint8_t kQ = uint8_t(1u << k128BitShift); + static inline constexpr uint8_t kS = uint8_t(1u << kScalarShift); + + static inline constexpr uint8_t k00 = uint8_t(0 << kSizeShift); + static inline constexpr uint8_t k01 = uint8_t(1 << kSizeShift); + static inline constexpr uint8_t k10 = uint8_t(2 << kSizeShift); + static inline constexpr uint8_t k11 = uint8_t(3 << kSizeShift); + + static inline constexpr uint8_t k00Q = k00 | kQ; + static inline constexpr uint8_t k01Q = k01 | kQ; + static inline constexpr uint8_t k10Q = k10 | kQ; + static inline constexpr uint8_t k11Q = k11 | kQ; + + static inline constexpr uint8_t k00S = k00 | kS; + static inline constexpr uint8_t k01S = k01 | kS; + static inline constexpr uint8_t k10S = k10 | kS; + static inline constexpr uint8_t k11S = k11 | kS; + + static inline constexpr uint8_t kInvalid = 0xFFu; + + // Masks used by SizeOpMap. + static inline constexpr uint8_t kSzQ = (0x3u << kSizeShift) | kQ; + static inline constexpr uint8_t kSzS = (0x3u << kSizeShift) | kS; + static inline constexpr uint8_t kSzQS = (0x3u << kSizeShift) | kQ | kS; + + //! \} + + //! \name Members + //! \{ uint8_t value; - inline bool isValid() const noexcept { return value != kInvalid; } - inline void makeInvalid() noexcept { value = kInvalid; } + //! \} + + //! \name Accessors + //! \{ + + inline bool is_valid() const noexcept { return value != kInvalid; } + inline void make_invalid() noexcept { value = kInvalid; } inline uint32_t q() const noexcept { return (value >> k128BitShift) & 0x1u; } inline uint32_t qs() const noexcept { return ((value >> k128BitShift) | (value >> kScalarShift)) & 0x1u; } inline uint32_t scalar() const noexcept { return (value >> kScalarShift) & 0x1u; } inline uint32_t size() const noexcept { return (value >> kSizeShift) & 0x3u; } - inline void decrementSize() noexcept { + inline void decrement_size() noexcept { ASMJIT_ASSERT(size() > 0); value = uint8_t(value - (1u << kSizeShift)); } + + //! \} }; struct SizeOpTable { @@ -129,32 +143,32 @@ struct SizeOpTable { }; // 40 elements for each combination. - SizeOp array[(uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB) + 1) * 8]; + SizeOp array[(uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8) + 1) * 8]; }; #define VALUE_BIN(x) { \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00 : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00Q : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00 : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00Q : SizeOp::kInvalid \ + x == (((uint32_t(RegType::kVec64 ) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00 : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00Q : \ + x == (((uint32_t(RegType::kVec64 ) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00 : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00Q : SizeOp::kInvalid \ } #define VALUE_ANY(x) { \ - x == (((uint32_t(RegType::kARM_VecB) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00S : \ - x == (((uint32_t(RegType::kARM_VecH) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k01S : \ - x == (((uint32_t(RegType::kARM_VecS) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k10S : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k11S : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00 : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00Q : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kH )) ? SizeOp::k01 : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kH )) ? SizeOp::k01Q : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kS )) ? SizeOp::k10 : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kS )) ? SizeOp::k10Q : \ - x == (((uint32_t(RegType::kARM_VecD) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kD )) ? SizeOp::k11S : \ - x == (((uint32_t(RegType::kARM_VecV) - uint32_t(RegType::kARM_VecB)) << 3) | uint32_t(VecElementType::kD )) ? SizeOp::k11Q : SizeOp::kInvalid \ + x == (((uint32_t(RegType::kVec8) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k00S : \ + x == (((uint32_t(RegType::kVec16) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k01S : \ + x == (((uint32_t(RegType::kVec32) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k10S : \ + x == (((uint32_t(RegType::kVec64) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kNone)) ? SizeOp::k11S : \ + x == (((uint32_t(RegType::kVec64) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00 : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kB )) ? SizeOp::k00Q : \ + x == (((uint32_t(RegType::kVec64) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kH )) ? SizeOp::k01 : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kH )) ? SizeOp::k01Q : \ + x == (((uint32_t(RegType::kVec64) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kS )) ? SizeOp::k10 : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kS )) ? SizeOp::k10Q : \ + x == (((uint32_t(RegType::kVec64) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kD )) ? SizeOp::k11S : \ + x == (((uint32_t(RegType::kVec128) - uint32_t(RegType::kVec8)) << 3) | uint32_t(VecElementType::kD )) ? SizeOp::k11Q : SizeOp::kInvalid \ } -static const SizeOpTable sizeOpTable[SizeOpTable::kCount] = { +static const SizeOpTable size_op_table[SizeOpTable::kCount] = { {{ ASMJIT_LOOKUP_TABLE_40(VALUE_BIN, 0) }}, {{ ASMJIT_LOOKUP_TABLE_40(VALUE_ANY, 0) }} }; @@ -163,12 +177,12 @@ static const SizeOpTable sizeOpTable[SizeOpTable::kCount] = { #undef VALUE_BIN struct SizeOpMap { - uint8_t tableId; - uint8_t sizeOpMask; - uint16_t acceptMask; + uint8_t table_id; + uint8_t size_op_mask; + uint16_t accept_mask; }; -static const constexpr SizeOpMap sizeOpMap[InstDB::kVO_Count] = { +static const constexpr SizeOpMap size_op_map[InstDB::kVO_Count] = { { // kVO_V_B: SizeOpTable::kTableBin, SizeOp::kQ , uint16_t(B(SizeOp::k00) | B(SizeOp::k00Q)) }, @@ -265,27 +279,28 @@ static const constexpr SizeOpMap sizeOpMap[InstDB::kVO_Count] = { } }; -static const Operand_& significantSimdOp(const Operand_& o0, const Operand_& o1, uint32_t instFlags) noexcept { - return !(instFlags & InstDB::kInstFlagLong) ? o0 : o1; +static const Operand_& significant_simd_op(const Operand_& o0, const Operand_& o1, uint32_t inst_flags) noexcept { + return !(inst_flags & InstDB::kInstFlagLong) ? o0 : o1; } -static inline SizeOp armElementTypeToSizeOp(uint32_t vecOpType, RegType regType, VecElementType elementType) noexcept { +static inline SizeOp element_type_to_size_op(uint32_t vec_op_type, RegType reg_type, VecElementType element_type) noexcept { // Instruction data or Assembler is wrong if this triggers an assertion failure. - ASMJIT_ASSERT(vecOpType < InstDB::kVO_Count); + ASMJIT_ASSERT(vec_op_type < InstDB::kVO_Count); // ElementType uses 3 bits in the operand signature, it should never overflow. - ASMJIT_ASSERT(uint32_t(elementType) <= 0x7u); + ASMJIT_ASSERT(uint32_t(element_type) <= 0x7u); - const SizeOpMap& map = sizeOpMap[vecOpType]; - const SizeOpTable& table = sizeOpTable[map.tableId]; + const SizeOpMap& map = size_op_map[vec_op_type]; + const SizeOpTable& table = size_op_table[map.table_id]; - size_t index = (Support::min(diff(regType, RegType::kARM_VecB), diff(RegType::kARM_VecV, RegType::kARM_VecB) + 1) << 3) | uint32_t(elementType); + size_t index = (Support::min(diff(reg_type, RegType::kVec8), diff(RegType::kVec128, RegType::kVec8) + 1) << 3) | uint32_t(element_type); SizeOp op = table.array[index]; - SizeOp modifiedOp { uint8_t(op.value & map.sizeOpMask) }; + SizeOp modified_op { uint8_t(op.value & map.size_op_mask) }; - if (!Support::bitTest(map.acceptMask, op.value)) - modifiedOp.makeInvalid(); + if (!Support::bit_test(map.accept_mask, op.value)) { + modified_op.make_invalid(); + } - return modifiedOp; + return modified_op; } // a64::Assembler - Immediate Encoding Utilities (Integral) @@ -302,17 +317,17 @@ struct HalfWordImm { struct LMHImm { uint32_t lm; uint32_t h; - uint32_t maxRmId; + uint32_t max_rm_id; }; -static inline uint32_t countZeroHalfWords64(uint64_t imm) noexcept { +static inline uint32_t count_zero_half_words_64(uint64_t imm) noexcept { return uint32_t((imm & 0x000000000000FFFFu) == 0) + uint32_t((imm & 0x00000000FFFF0000u) == 0) + uint32_t((imm & 0x0000FFFF00000000u) == 0) + uint32_t((imm & 0xFFFF000000000000u) == 0) ; } -static uint32_t encodeMovSequence32(uint32_t out[2], uint32_t imm, uint32_t rd, uint32_t x) noexcept { +static uint32_t encode_mov_sequence_32(uint32_t out[2], uint32_t imm, uint32_t rd, uint32_t x) noexcept { ASMJIT_ASSERT(rd <= 31); uint32_t kMovZ = 0b01010010100000000000000000000000 | (x << 31); @@ -344,7 +359,7 @@ static uint32_t encodeMovSequence32(uint32_t out[2], uint32_t imm, uint32_t rd, return 2; } -static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd, uint32_t x) noexcept { +static uint32_t encode_mov_sequence_64(uint32_t out[4], uint64_t imm, uint32_t rd, uint32_t x) noexcept { ASMJIT_ASSERT(rd <= 31); uint32_t kMovZ = 0b11010010100000000000000000000000; @@ -352,25 +367,26 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd, uint32_t kMovK = 0b11110010100000000000000000000000; if (imm <= 0xFFFFFFFFu) - return encodeMovSequence32(out, uint32_t(imm), rd, x); + return encode_mov_sequence_32(out, uint32_t(imm), rd, x); - uint32_t zhw = countZeroHalfWords64( imm); - uint32_t ohw = countZeroHalfWords64(~imm); + uint32_t zhw = count_zero_half_words_64( imm); + uint32_t ohw = count_zero_half_words_64(~imm); if (zhw >= ohw) { uint32_t op = kMovZ; uint32_t count = 0; - for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { - uint32_t hwImm = uint32_t(imm & 0xFFFFu); - if (hwImm == 0) + for (uint32_t hw_index = 0; hw_index < 4; hw_index++, imm >>= 16) { + uint32_t hw_imm = uint32_t(imm & 0xFFFFu); + if (hw_imm == 0) { continue; + } - out[count++] = op | (hwIndex << 21) | (hwImm << 5) | rd; + out[count++] = op | (hw_index << 21) | (hw_imm << 5) | rd; op = kMovK; } - // This should not happen - zero should be handled by encodeMovSequence32(). + // This should not happen - zero should be handled by encode_mov_sequence_32(). ASMJIT_ASSERT(count > 0); return count; @@ -378,39 +394,40 @@ static uint32_t encodeMovSequence64(uint32_t out[4], uint64_t imm, uint32_t rd, else { uint32_t op = kMovN; uint32_t count = 0; - uint32_t negMask = 0xFFFFu; + uint32_t neg_mask = 0xFFFFu; - for (uint32_t hwIndex = 0; hwIndex < 4; hwIndex++, imm >>= 16) { - uint32_t hwImm = uint32_t(imm & 0xFFFFu); - if (hwImm == 0xFFFFu) + for (uint32_t hw_index = 0; hw_index < 4; hw_index++, imm >>= 16) { + uint32_t hw_imm = uint32_t(imm & 0xFFFFu); + if (hw_imm == 0xFFFFu) { continue; + } - out[count++] = op | (hwIndex << 21) | ((hwImm ^ negMask) << 5) | rd; + out[count++] = op | (hw_index << 21) | ((hw_imm ^ neg_mask) << 5) | rd; op = kMovK; - negMask = 0; + neg_mask = 0; } if (count == 0) { - out[count++] = kMovN | ((0xFFFF ^ negMask) << 5) | rd; + out[count++] = kMovN | ((0xFFFF ^ neg_mask) << 5) | rd; } return count; } } -static inline bool encodeLMH(uint32_t sizeField, uint32_t elementIndex, LMHImm* out) noexcept { - if (sizeField != 1 && sizeField != 2) +static inline bool encode_lmh(uint32_t size_field, uint32_t element_index, Out out) noexcept { + if (size_field != 1 && size_field != 2) return false; - uint32_t hShift = 3u - sizeField; - uint32_t lmShift = sizeField - 1u; - uint32_t maxElementIndex = 15u >> sizeField; + uint32_t h_shift = 3u - size_field; + uint32_t lm_shift = size_field - 1u; + uint32_t max_element_index = 15u >> size_field; - out->h = elementIndex >> hShift; - out->lm = (elementIndex << lmShift) & 0x3u; - out->maxRmId = (8u << sizeField) - 1; + out->h = element_index >> h_shift; + out->lm = (element_index << lm_shift) & 0x3u; + out->max_rm_id = (8u << size_field) - 1; - return elementIndex <= maxElementIndex; + return element_index <= max_element_index; } // a64::Assembler - Opcode @@ -433,27 +450,27 @@ struct Opcode { inline uint32_t get() const noexcept { return v; } inline void reset(uint32_t value) noexcept { v = value; } - inline bool hasQ() const noexcept { return (v & kQ) != 0; } - inline bool hasX() const noexcept { return (v & kX) != 0; } + inline bool has_q() const noexcept { return (v & kQ) != 0; } + inline bool has_x() const noexcept { return (v & kX) != 0; } template - inline Opcode& addImm(T value, uint32_t bitIndex) noexcept { return operator|=(uint32_t(value) << bitIndex); } + inline Opcode& add_imm(T value, uint32_t bit_index) noexcept { return operator|=(uint32_t(value) << bit_index); } template - inline Opcode& xorImm(T value, uint32_t bitIndex) noexcept { return operator^=(uint32_t(value) << bitIndex); } + inline Opcode& xor_imm(T value, uint32_t bit_index) noexcept { return operator^=(uint32_t(value) << bit_index); } template - inline Opcode& addIf(T value, const Condition& condition) noexcept { return operator|=(condition ? uint32_t(value) : uint32_t(0)); } + inline Opcode& add_if(T value, const Condition& condition) noexcept { return operator|=(condition ? uint32_t(value) : uint32_t(0)); } - inline Opcode& addLogicalImm(const LogicalImm& logicalImm) noexcept { - addImm(logicalImm.n, 22); - addImm(logicalImm.r, 16); - addImm(logicalImm.s, 10); + inline Opcode& add_logical_imm(const LogicalImm& logical_imm) noexcept { + add_imm(logical_imm.n, 22); + add_imm(logical_imm.r, 16); + add_imm(logical_imm.s, 10); return *this; } - inline Opcode& addReg(uint32_t id, uint32_t bitIndex) noexcept { return operator|=((id & 31u) << bitIndex); } - inline Opcode& addReg(const Operand_& op, uint32_t bitIndex) noexcept { return addReg(op.id(), bitIndex); } + inline Opcode& add_reg(uint32_t id, uint32_t bit_index) noexcept { return operator|=((id & 31u) << bit_index); } + inline Opcode& add_reg(const Operand_& op, uint32_t bit_index) noexcept { return add_reg(op.id(), bit_index); } inline Opcode& operator=(uint32_t x) noexcept { v = x; return *this; } inline Opcode& operator&=(uint32_t x) noexcept { v &= x; return *this; } @@ -468,69 +485,68 @@ struct Opcode { // a64::Assembler - Signature Utilities // ==================================== -// TODO: [ARM] Deprecate matchSignature. -static inline bool matchSignature(const Operand_& o0, const Operand_& o1, uint32_t instFlags) noexcept { - if (!(instFlags & (InstDB::kInstFlagLong | InstDB::kInstFlagNarrow))) +// TODO: [ARM] Deprecate match_signature. +static inline bool match_signature(const Operand_& o0, const Operand_& o1, uint32_t inst_flags) noexcept { + if (!(inst_flags & (InstDB::kInstFlagLong | InstDB::kInstFlagNarrow))) return o0.signature() == o1.signature(); // TODO: [ARM] Something smart to validate this. return true; } -static inline bool matchSignature(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t instFlags) noexcept { - return matchSignature(o0, o1, instFlags) && o1.signature() == o2.signature(); +static inline bool match_signature(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t inst_flags) noexcept { + return match_signature(o0, o1, inst_flags) && o1.signature() == o2.signature(); } -static inline bool matchSignature(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint32_t instFlags) noexcept { - return matchSignature(o0, o1, instFlags) && o1.signature() == o2.signature() && o2.signature() == o3.signature(); +static inline bool match_signature(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, uint32_t inst_flags) noexcept { + return match_signature(o0, o1, inst_flags) && o1.signature() == o2.signature() && o2.signature() == o3.signature(); } // Memory must be either: // 1. Absolute address, which will be converted to relative. // 2. Relative displacement (Label). // 3. Base register + either offset or index. -static inline bool armCheckMemBaseIndexRel(const Mem& mem) noexcept { - // Allowed base types (Nothing, Label, and GpX). - constexpr uint32_t kBaseMask = B(0) | - B(RegType::kLabelTag) | - B(RegType::kARM_GpX); - - // Allowed index types (Nothing, GpW, and GpX). - constexpr uint32_t kIndexMask = B(0) | - B(RegType::kARM_GpW) | - B(RegType::kARM_GpX) ; +static inline bool check_mem_base_index_rel(const Mem& mem) noexcept { + // Allowed base types (Nothing, Label, and Gp64). + constexpr uint32_t kBaseMask = B(0) | B(RegType::kLabelTag) | B(RegType::kGp64); + // Allowed index types (Nothing, Gp32, and Gp64). + constexpr uint32_t kIndexMask = B(0) | B(RegType::kGp32) | B(RegType::kGp64) ; - RegType baseType = mem.baseType(); - RegType indexType = mem.indexType(); + RegType base_type = mem.base_type(); + RegType index_type = mem.index_type(); - if (!Support::bitTest(kBaseMask, baseType)) + if (!Support::bit_test(kBaseMask, base_type)) { return false; + } - if (baseType > RegType::kLabelTag) { - // Index allows either GpW or GpX. - if (!Support::bitTest(kIndexMask, indexType)) + if (base_type > RegType::kLabelTag) { + // Index allows either Gp32 or Gp64. + if (!Support::bit_test(kIndexMask, index_type)) { return false; + } - if (indexType == RegType::kNone) + if (index_type == RegType::kNone) { return true; - else - return !mem.hasOffset(); + } + else { + return !mem.has_offset(); + } } else { // No index register allowed if this is a PC relative address (literal). - return indexType == RegType::kNone; + return index_type == RegType::kNone; } } struct EncodeFpOpcodeBits { - uint32_t sizeMask; + uint32_t size_mask; uint32_t mask[3]; }; -static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint32_t vOp, uint32_t vHf, Opcode* opcode, uint32_t* szOut) noexcept { +static inline bool pick_fp_opcode(const Vec& reg, uint32_t s_op, uint32_t s_hf, uint32_t v_op, uint32_t v_hf, Opcode* opcode, uint32_t* sz_out) noexcept { static constexpr uint32_t kQBitIndex = 30; - static const EncodeFpOpcodeBits szBits[InstDB::kHF_Count] = { + static const EncodeFpOpcodeBits sz_bits_table[InstDB::kHF_Count] = { { B(2) | B(1) , { 0u , 0u, B(22) } }, { B(2) | B(1) | B(0), { 0u , 0u, 0u } }, { B(2) | B(1) | B(0), { B(23) | B(22) , 0u, B(22) } }, @@ -539,49 +555,51 @@ static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint { B(2) | B(1) | B(0), { B(23) , 0u, B(22) } } }; - if (!reg.hasElementType()) { + if (!reg.has_element_type()) { // Scalar operation [HSD]. - uint32_t sz = diff(reg.type(), RegType::kARM_VecH); - if (sz > 2u || !Support::bitTest(szBits[sHf].sizeMask, sz)) + uint32_t sz = diff(reg.reg_type(), RegType::kVec16); + if (sz > 2u || !Support::bit_test(sz_bits_table[s_hf].size_mask, sz)) { return false; + } - opcode->reset(szBits[sHf].mask[sz] ^ sOp); - *szOut = sz; - return sOp != 0; + opcode->reset(sz_bits_table[s_hf].mask[sz] ^ s_op); + *sz_out = sz; + return s_op != 0; } else { // Vector operation [HSD]. - uint32_t q = diff(reg.type(), RegType::kARM_VecD); - uint32_t sz = diff(reg.elementType(), VecElementType::kH); + uint32_t q = diff(reg.reg_type(), RegType::kVec64); + uint32_t sz = diff(reg.element_type(), VecElementType::kH); - if (q > 1u || sz > 2u || !Support::bitTest(szBits[vHf].sizeMask, sz)) + if (q > 1u || sz > 2u || !Support::bit_test(sz_bits_table[v_hf].size_mask, sz)) { return false; + } - opcode->reset(szBits[vHf].mask[sz] ^ (vOp | (q << kQBitIndex))); - *szOut = sz; - return vOp != 0; + opcode->reset(sz_bits_table[v_hf].mask[sz] ^ (v_op | (q << kQBitIndex))); + *sz_out = sz; + return v_op != 0; } } -static inline bool pickFpOpcode(const Vec& reg, uint32_t sOp, uint32_t sHf, uint32_t vOp, uint32_t vHf, Opcode* opcode) noexcept { +static inline bool pick_fp_opcode(const Vec& reg, uint32_t s_op, uint32_t s_hf, uint32_t v_op, uint32_t v_hf, Opcode* opcode) noexcept { uint32_t sz; - return pickFpOpcode(reg, sOp, sHf, vOp, vHf, opcode, &sz); + return pick_fp_opcode(reg, s_op, s_hf, v_op, v_hf, opcode, &sz); } // a64::Assembler - Operand Checks // =============================== // Checks whether all operands have the same signature. -static inline bool checkSignature(const Operand_& o0, const Operand_& o1) noexcept { +static inline bool check_signature(const Operand_& o0, const Operand_& o1) noexcept { return o0.signature() == o1.signature(); } -static inline bool checkSignature(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { +static inline bool check_signature(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { return o0.signature() == o1.signature() && o1.signature() == o2.signature(); } -static inline bool checkSignature(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { +static inline bool check_signature(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { return o0.signature() == o1.signature() && o1.signature() == o2.signature() && o2.signature() == o3.signature(); @@ -589,58 +607,57 @@ static inline bool checkSignature(const Operand_& o0, const Operand_& o1, const // Checks whether the register is GP register of the allowed types. // -// Allowed is a 2-bit mask, where the first bits allows GpW and the second bit -// allows GpX. These bits are usually stored within the instruction, but could -// be also hardcoded in the assembler for instructions where GP types are not -// selectable. -static inline bool checkGpType(const Operand_& op, uint32_t allowed) noexcept { - RegType type = op.as().type(); - return Support::bitTest(allowed << uint32_t(RegType::kARM_GpW), type); +// Allowed is a 2-bit mask, where the first bits allows Gp32 and the second bit allows Gp64. These bits are usually +// stored within the instruction, but could be also hardcoded in the assembler for instructions where GP types are +// not selectable. +static inline bool check_gp_type(const Operand_& op, uint32_t allowed) noexcept { + RegType type = op.as().reg_type(); + return Support::bit_test(allowed << uint32_t(RegType::kGp32), type); } -static inline bool checkGpType(const Operand_& op, uint32_t allowed, uint32_t* x) noexcept { - // NOTE: We set 'x' to one only when GpW is allowed, otherwise the X is part +static inline bool check_gp_type(const Operand_& op, uint32_t allowed, uint32_t* x) noexcept { + // NOTE: We set 'x' to one only when Gp32 is allowed, otherwise the X is part // of the opcode and we cannot set it. This is why this works without requiring // additional logic. - RegType type = op.as().type(); - *x = diff(type, RegType::kARM_GpW) & allowed; - return Support::bitTest(allowed << uint32_t(RegType::kARM_GpW), type); + RegType type = op.as().reg_type(); + *x = diff(type, RegType::kGp32) & allowed; + return Support::bit_test(allowed << uint32_t(RegType::kGp32), type); } -static inline bool checkGpType(const Operand_& o0, const Operand_& o1, uint32_t allowed, uint32_t* x) noexcept { - return checkGpType(o0, allowed, x) && checkSignature(o0, o1); +static inline bool check_gp_type(const Operand_& o0, const Operand_& o1, uint32_t allowed, uint32_t* x) noexcept { + return check_gp_type(o0, allowed, x) && check_signature(o0, o1); } -static inline bool checkGpType(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t allowed, uint32_t* x) noexcept { - return checkGpType(o0, allowed, x) && checkSignature(o0, o1, o2); +static inline bool check_gp_type(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t allowed, uint32_t* x) noexcept { + return check_gp_type(o0, allowed, x) && check_signature(o0, o1, o2); } -static inline bool checkGpId(const Operand_& op, uint32_t hiId = kZR) noexcept { +static inline bool check_gp_id(const Operand_& op, uint32_t hi_id = kZR) noexcept { uint32_t id = op.as().id(); - return id < 31u || id == hiId; + return id < 31u || id == hi_id; } -static inline bool checkGpId(const Operand_& o0, const Operand_& o1, uint32_t hiId = kZR) noexcept { +static inline bool check_gp_id(const Operand_& o0, const Operand_& o1, uint32_t hi_id = kZR) noexcept { uint32_t id0 = o0.as().id(); uint32_t id1 = o1.as().id(); - return (id0 < 31u || id0 == hiId) && (id1 < 31u || id1 == hiId); + return (id0 < 31u || id0 == hi_id) && (id1 < 31u || id1 == hi_id); } -static inline bool checkGpId(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t hiId = kZR) noexcept { +static inline bool check_gp_id(const Operand_& o0, const Operand_& o1, const Operand_& o2, uint32_t hi_id = kZR) noexcept { uint32_t id0 = o0.as().id(); uint32_t id1 = o1.as().id(); uint32_t id2 = o2.as().id(); - return (id0 < 31u || id0 == hiId) && (id1 < 31u || id1 == hiId) && (id2 < 31u || id2 == hiId); + return (id0 < 31u || id0 == hi_id) && (id1 < 31u || id1 == hi_id) && (id2 < 31u || id2 == hi_id); } -static inline bool checkVecId(const Operand_& op) noexcept { +static inline bool check_vec_id(const Operand_& op) noexcept { uint32_t id = op.as().id(); return id <= 31u; } -static inline bool checkVecId(const Operand_& o0, const Operand_& o1) noexcept { +static inline bool check_vec_id(const Operand_& o0, const Operand_& o1) noexcept { uint32_t id0 = o0.as().id(); uint32_t id1 = o1.as().id(); @@ -648,7 +665,7 @@ static inline bool checkVecId(const Operand_& o0, const Operand_& o1) noexcept { } /* Unused at the moment. -static inline bool checkVecId(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { +static inline bool check_vec_id(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { uint32_t id0 = o0.as().id(); uint32_t id1 = o1.as().id(); uint32_t id2 = o2.as().id(); @@ -656,7 +673,7 @@ static inline bool checkVecId(const Operand_& o0, const Operand_& o1, const Oper return (id0 | id1 | id2) <= 31u; } -static inline bool checkVecId(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { +static inline bool check_vec_id(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { uint32_t id0 = o0.as().id(); uint32_t id1 = o1.as().id(); uint32_t id2 = o2.as().id(); @@ -666,24 +683,24 @@ static inline bool checkVecId(const Operand_& o0, const Operand_& o1, const Oper } */ -static inline bool checkMemBase(const Mem& mem) noexcept { - return mem.baseType() == RegType::kARM_GpX && mem.baseId() <= 31; +static inline bool check_mem_base(const Mem& mem) noexcept { + return mem.base_type() == RegType::kGp64 && mem.base_id() <= 31; } -static inline bool checkEven(const Operand_& o0, const Operand_& o1) noexcept { +static inline bool check_even(const Operand_& o0, const Operand_& o1) noexcept { return ((o0.id() | o1.id()) & 1) == 0; } -static inline bool checkConsecutive(const Operand_& o0, const Operand_& o1) noexcept { +static inline bool check_consecutive(const Operand_& o0, const Operand_& o1) noexcept { return ((o0.id() + 1u) & 0x1Fu) == o1.id(); } -static inline bool checkConsecutive(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { +static inline bool check_consecutive(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { return ((o0.id() + 1u) & 0x1Fu) == o1.id() && ((o0.id() + 2u) & 0x1Fu) == o2.id(); } -static inline bool checkConsecutive(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { +static inline bool check_consecutive(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { return ((o0.id() + 1u) & 0x1Fu) == o1.id() && ((o0.id() + 2u) & 0x1Fu) == o2.id() && ((o0.id() + 3u) & 0x1Fu) == o3.id(); @@ -692,47 +709,50 @@ static inline bool checkConsecutive(const Operand_& o0, const Operand_& o1, cons // a64::Assembler - CheckReg // ========================= -#define V(index) (index == uint32_t(RegType::kARM_GpW) ? Gp::kIdZr : \ - index == uint32_t(RegType::kARM_GpX) ? Gp::kIdZr : \ - index == uint32_t(RegType::kARM_VecB) ? 31u : \ - index == uint32_t(RegType::kARM_VecH) ? 31u : \ - index == uint32_t(RegType::kARM_VecS) ? 31u : \ - index == uint32_t(RegType::kARM_VecD) ? 31u : \ - index == uint32_t(RegType::kARM_VecV) ? 31u : 0) -static const Support::Array commonHiRegIdOfType = {{ +#define V(index) (index == uint32_t(RegType::kGp32) ? Gp::kIdZr : \ + index == uint32_t(RegType::kGp64) ? Gp::kIdZr : \ + index == uint32_t(RegType::kVec8) ? 31u : \ + index == uint32_t(RegType::kVec16) ? 31u : \ + index == uint32_t(RegType::kVec32) ? 31u : \ + index == uint32_t(RegType::kVec64) ? 31u : \ + index == uint32_t(RegType::kVec128) ? 31u : 0) +static const Support::Array common_hi_reg_id_of_type_table = {{ ASMJIT_LOOKUP_TABLE_32(V, 0) }}; #undef V -static inline bool checkValidRegs(const Operand_& o0) noexcept { - return bool(unsigned(o0.id() < 31) | unsigned(o0.id() == commonHiRegIdOfType[o0.as().type()])); +static inline bool check_valid_regs(const Operand_& o0) noexcept { + return bool(unsigned(o0.id() < 31) | unsigned(o0.id() == common_hi_reg_id_of_type_table[o0.as().reg_type()])); } -static inline bool checkValidRegs(const Operand_& o0, const Operand_& o1) noexcept { - return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == commonHiRegIdOfType[o0.as().type()])) & - (unsigned(o1.id() < 31) | unsigned(o1.id() == commonHiRegIdOfType[o1.as().type()]))); +static inline bool check_valid_regs(const Operand_& o0, const Operand_& o1) noexcept { + return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == common_hi_reg_id_of_type_table[o0.as().reg_type()])) & + (unsigned(o1.id() < 31) | unsigned(o1.id() == common_hi_reg_id_of_type_table[o1.as().reg_type()]))); } -static inline bool checkValidRegs(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { - return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == commonHiRegIdOfType[o0.as().type()])) & - (unsigned(o1.id() < 31) | unsigned(o1.id() == commonHiRegIdOfType[o1.as().type()])) & - (unsigned(o2.id() < 31) | unsigned(o2.id() == commonHiRegIdOfType[o2.as().type()]))); +static inline bool check_valid_regs(const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept { + return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == common_hi_reg_id_of_type_table[o0.as().reg_type()])) & + (unsigned(o1.id() < 31) | unsigned(o1.id() == common_hi_reg_id_of_type_table[o1.as().reg_type()])) & + (unsigned(o2.id() < 31) | unsigned(o2.id() == common_hi_reg_id_of_type_table[o2.as().reg_type()]))); } -static inline bool checkValidRegs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { - return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == commonHiRegIdOfType[o0.as().type()])) & - (unsigned(o1.id() < 31) | unsigned(o1.id() == commonHiRegIdOfType[o1.as().type()])) & - (unsigned(o2.id() < 31) | unsigned(o2.id() == commonHiRegIdOfType[o2.as().type()])) & - (unsigned(o3.id() < 31) | unsigned(o3.id() == commonHiRegIdOfType[o3.as().type()]))); +static inline bool check_valid_regs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept { + return bool((unsigned(o0.id() < 31) | unsigned(o0.id() == common_hi_reg_id_of_type_table[o0.as().reg_type()])) & + (unsigned(o1.id() < 31) | unsigned(o1.id() == common_hi_reg_id_of_type_table[o1.as().reg_type()])) & + (unsigned(o2.id() < 31) | unsigned(o2.id() == common_hi_reg_id_of_type_table[o2.as().reg_type()])) & + (unsigned(o3.id() < 31) | unsigned(o3.id() == common_hi_reg_id_of_type_table[o3.as().reg_type()]))); } // a64::Assembler - Construction & Destruction // =========================================== Assembler::Assembler(CodeHolder* code) noexcept : BaseAssembler() { - _archMask = uint64_t(1) << uint32_t(Arch::kAArch64); - if (code) + _arch_mask = uint64_t(1) << uint32_t(Arch::kAArch64); + init_emitter_funcs(this); + + if (code) { code->attach(this); + } } Assembler::~Assembler() noexcept {} @@ -758,7 +778,7 @@ Assembler::~Assembler() noexcept {} (uint32_t(OperandType::k##OP2) << 6) + \ (uint32_t(OperandType::k##OP3) << 9)) -Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) { +Error Assembler::_emit(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) { // Logging/Validation/Error. constexpr InstOptions kRequiresSpecialHandling = InstOptions::kReserved; @@ -768,78 +788,95 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // Combine all instruction options and also check whether the instruction // is valid. All options that require special handling (including invalid // instruction) are handled by the next branch. - InstOptions options = InstOptions(instId - 1 >= Inst::_kIdCount - 1) | InstOptions((size_t)(_bufferEnd - writer.cursor()) < 4) | instOptions() | forcedInstOptions(); + InstOptions options = InstOptions(inst_id - 1 >= Inst::_kIdCount - 1) | InstOptions((size_t)(_buffer_end - writer.cursor()) < 4) | inst_options() | forced_inst_options(); - CondCode instCC = BaseInst::extractARMCondCode(instId); - instId = instId & uint32_t(InstIdParts::kRealId); + CondCode inst_cc = BaseInst::extract_arm_cond_code(inst_id); + inst_id = inst_id & uint32_t(InstIdParts::kRealId); - if (instId >= Inst::_kIdCount) - instId = 0; + if (inst_id >= Inst::_kIdCount) { + inst_id = 0; + } - const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId]; - uint32_t encodingIndex = instInfo->_encodingDataIndex; + const InstDB::InstInfo* inst_info = &InstDB::_inst_info_table[inst_id]; + uint32_t encoding_index = inst_info->_encoding_data_index; Opcode opcode; uint32_t isign4; - uint32_t instFlags; + uint32_t inst_flags; - const Operand_& o3 = opExt[EmitterUtils::kOp3]; - const Operand_* rmRel = nullptr; + const Operand_& o3 = op_ext[EmitterUtils::kOp3]; + const Operand_* rm_rel = nullptr; - uint32_t multipleOpData[4]; - uint32_t multipleOpCount; + uint32_t multiple_op_data[4]; + uint32_t multiple_op_count; // These are only used when instruction uses a relative displacement. - OffsetFormat offsetFormat; // Offset format. - uint64_t offsetValue; // Offset value (if known). + OffsetFormat offset_format; // Offset format. + uint64_t offset_value; // Offset value (if known). if (ASMJIT_UNLIKELY(Support::test(options, kRequiresSpecialHandling))) { - if (ASMJIT_UNLIKELY(!_code)) - return reportError(DebugUtils::errored(kErrorNotInitialized)); + if (ASMJIT_UNLIKELY(!_code)) { + return report_error(make_error(Error::kNotInitialized)); + } // Unknown instruction. - if (ASMJIT_UNLIKELY(instId == 0)) + if (ASMJIT_UNLIKELY(inst_id == 0)) { goto InvalidInstruction; + } // Condition code can only be used with 'B' instruction. - if (ASMJIT_UNLIKELY(instCC != CondCode::kAL && instId != Inst::kIdB)) + if (ASMJIT_UNLIKELY(inst_cc != CondCode::kAL && inst_id != Inst::kIdB)) { goto InvalidInstruction; + } // Grow request, happens rarely. - err = writer.ensureSpace(this, 4); - if (ASMJIT_UNLIKELY(err)) + err = writer.ensure_space(this, 4); + if (ASMJIT_UNLIKELY(err != Error::kOk)) { goto Failed; + } #ifndef ASMJIT_NO_VALIDATION // Strict validation. - if (hasDiagnosticOption(DiagnosticOptions::kValidateAssembler)) { - Operand_ opArray[Globals::kMaxOpCount]; - EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt); + if (has_diagnostic_option(DiagnosticOptions::kValidateAssembler)) { + Operand_ op_array[Globals::kMaxOpCount]; + EmitterUtils::op_array_from_emit_args(op_array, o0, o1, o2, op_ext); - err = _funcs.validate(BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount, ValidationFlags::kNone); - if (ASMJIT_UNLIKELY(err)) + err = _funcs.validate(BaseInst(inst_id, options, _extra_reg), op_array, Globals::kMaxOpCount, ValidationFlags::kNone); + if (ASMJIT_UNLIKELY(err != Error::kOk)) { goto Failed; + } } #endif } // Signature of the first 4 operands. - isign4 = (uint32_t(o0.opType()) ) + - (uint32_t(o1.opType()) << 3) + - (uint32_t(o2.opType()) << 6) + - (uint32_t(o3.opType()) << 9); - instFlags = instInfo->flags(); + isign4 = (uint32_t(o0.op_type()) ) + + (uint32_t(o1.op_type()) << 3) + + (uint32_t(o2.op_type()) << 6) + + (uint32_t(o3.op_type()) << 9); + inst_flags = inst_info->flags(); - switch (instInfo->_encoding) { + switch (inst_info->_encoding) { // ------------------------------------------------------------------------ // [Base - Universal] // ------------------------------------------------------------------------ case InstDB::kEncodingBaseOp: { - const InstDB::EncodingData::BaseOp& opData = InstDB::EncodingData::baseOp[encodingIndex]; + const InstDB::EncodingData::BaseOp& op_data = InstDB::EncodingData::baseOp[encoding_index]; if (isign4 == 0) { - opcode.reset(opData.opcode); + opcode.reset(op_data.opcode); + goto EmitOp; + } + + break; + } + + case InstDB::kEncodingBaseOpX16: { + const InstDB::EncodingData::BaseOpX16& op_data = InstDB::EncodingData::baseOpX16[encoding_index]; + + if (isign4 == ENC_OPS1(Reg) && o0.as().is_gp64(16)) { + opcode.reset(op_data.opcode); goto EmitOp; } @@ -847,17 +884,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseOpImm: { - const InstDB::EncodingData::BaseOpImm& opData = InstDB::EncodingData::baseOpImm[encodingIndex]; + const InstDB::EncodingData::BaseOpImm& op_data = InstDB::EncodingData::baseOpImm[encoding_index]; if (isign4 == ENC_OPS1(Imm)) { - uint64_t imm = o0.as().valueAs(); - uint32_t immMax = 1u << opData.immBits; + uint64_t imm = o0.as().value_as(); + uint32_t immMax = 1u << op_data.imm_bits; if (imm >= immMax) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(imm, opData.immOffset); + opcode.reset(op_data.opcode); + opcode.add_imm(imm, op_data.imm_offset); goto EmitOp; } @@ -865,17 +902,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseR: { - const InstDB::EncodingData::BaseR& opData = InstDB::EncodingData::baseR[encodingIndex]; + const InstDB::EncodingData::BaseR& op_data = InstDB::EncodingData::baseR[encoding_index]; if (isign4 == ENC_OPS1(Reg)) { - if (!checkGpType(o0, opData.rType)) + if (!check_gp_type(o0, op_data.reg_type)) goto InvalidInstruction; - if (!checkGpId(o0, opData.rHiId)) + if (!check_gp_id(o0, op_data.reg_hi_id)) goto InvalidPhysId; - opcode.reset(opData.opcode); - opcode.addReg(o0, opData.rShift); + opcode.reset(op_data.opcode); + opcode.add_reg(o0, op_data.r_shift); goto EmitOp; } @@ -883,29 +920,29 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRR: { - const InstDB::EncodingData::BaseRR& opData = InstDB::EncodingData::baseRR[encodingIndex]; + const InstDB::EncodingData::BaseRR& op_data = InstDB::EncodingData::baseRR[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { uint32_t x; - if (!checkGpType(o0, opData.aType, &x)) + if (!check_gp_type(o0, op_data.a_type, &x)) goto InvalidInstruction; - if (!checkGpType(o1, opData.bType)) + if (!check_gp_type(o1, op_data.b_type)) goto InvalidInstruction; - if (opData.uniform && !checkSignature(o0, o1)) + if (op_data.uniform && !check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, opData.aHiId)) + if (!check_gp_id(o0, op_data.a_hi_id)) goto InvalidPhysId; - if (!checkGpId(o1, opData.bHiId)) + if (!check_gp_id(o1, op_data.b_hi_id)) goto InvalidPhysId; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addReg(o1, opData.bShift); - opcode.addReg(o0, opData.aShift); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_reg(o1, op_data.b_shift); + opcode.add_reg(o0, op_data.a_shift); goto EmitOp; } @@ -913,36 +950,36 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRRR: { - const InstDB::EncodingData::BaseRRR& opData = InstDB::EncodingData::baseRRR[encodingIndex]; + const InstDB::EncodingData::BaseRRR& op_data = InstDB::EncodingData::baseRRR[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { uint32_t x; - if (!checkGpType(o0, opData.aType, &x)) + if (!check_gp_type(o0, op_data.a_type, &x)) goto InvalidInstruction; - if (!checkGpType(o1, opData.bType)) + if (!check_gp_type(o1, op_data.b_type)) goto InvalidInstruction; - if (!checkGpType(o2, opData.cType)) + if (!check_gp_type(o2, op_data.c_type)) goto InvalidInstruction; - if (opData.uniform && !checkSignature(o0, o1, o2)) + if (op_data.uniform && !check_signature(o0, o1, o2)) goto InvalidInstruction; - if (!checkGpId(o0, opData.aHiId)) + if (!check_gp_id(o0, op_data.a_hi_id)) goto InvalidPhysId; - if (!checkGpId(o1, opData.bHiId)) + if (!check_gp_id(o1, op_data.b_hi_id)) goto InvalidPhysId; - if (!checkGpId(o2, opData.cHiId)) + if (!check_gp_id(o2, op_data.c_hi_id)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -950,43 +987,43 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRRRR: { - const InstDB::EncodingData::BaseRRRR& opData = InstDB::EncodingData::baseRRRR[encodingIndex]; + const InstDB::EncodingData::BaseRRRR& op_data = InstDB::EncodingData::baseRRRR[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) { uint32_t x; - if (!checkGpType(o0, opData.aType, &x)) + if (!check_gp_type(o0, op_data.a_type, &x)) goto InvalidInstruction; - if (!checkGpType(o1, opData.bType)) + if (!check_gp_type(o1, op_data.b_type)) goto InvalidInstruction; - if (!checkGpType(o2, opData.cType)) + if (!check_gp_type(o2, op_data.c_type)) goto InvalidInstruction; - if (!checkGpType(o3, opData.dType)) + if (!check_gp_type(o3, op_data.d_type)) goto InvalidInstruction; - if (opData.uniform && !checkSignature(o0, o1, o2, o3)) + if (op_data.uniform && !check_signature(o0, o1, o2, o3)) goto InvalidInstruction; - if (!checkGpId(o0, opData.aHiId)) + if (!check_gp_id(o0, op_data.a_hi_id)) goto InvalidPhysId; - if (!checkGpId(o1, opData.bHiId)) + if (!check_gp_id(o1, op_data.b_hi_id)) goto InvalidPhysId; - if (!checkGpId(o2, opData.cHiId)) + if (!check_gp_id(o2, op_data.c_hi_id)) goto InvalidPhysId; - if (!checkGpId(o3, opData.dHiId)) + if (!check_gp_id(o3, op_data.d_hi_id)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addReg(o3, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_reg(o3, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -994,37 +1031,37 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRRII: { - const InstDB::EncodingData::BaseRRII& opData = InstDB::EncodingData::baseRRII[encodingIndex]; + const InstDB::EncodingData::BaseRRII& op_data = InstDB::EncodingData::baseRRII[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { - if (!checkGpType(o0, opData.aType)) + if (!check_gp_type(o0, op_data.a_type)) goto InvalidInstruction; - if (!checkGpType(o1, opData.bType)) + if (!check_gp_type(o1, op_data.b_type)) goto InvalidInstruction; - if (!checkGpId(o0, opData.aHiId)) + if (!check_gp_id(o0, op_data.a_hi_id)) goto InvalidPhysId; - if (!checkGpId(o1, opData.bHiId)) + if (!check_gp_id(o1, op_data.b_hi_id)) goto InvalidPhysId; - if (o2.as().valueAs() >= Support::bitMask(opData.aImmSize + opData.aImmDiscardLsb) || - o3.as().valueAs() >= Support::bitMask(opData.bImmSize + opData.bImmDiscardLsb)) + if (o2.as().value_as() >= Support::bit_mask(op_data.a_imm_size + op_data.a_imm_discard_lsb) || + o3.as().value_as() >= Support::bit_mask(op_data.b_imm_size + op_data.b_imm_discard_lsb)) goto InvalidImmediate; - uint32_t aImm = o2.as().valueAs() >> opData.aImmDiscardLsb; - uint32_t bImm = o3.as().valueAs() >> opData.bImmDiscardLsb; + uint32_t a_imm = o2.as().value_as() >> op_data.a_imm_discard_lsb; + uint32_t b_imm = o3.as().value_as() >> op_data.b_imm_discard_lsb; - if ((aImm << opData.aImmDiscardLsb) != o2.as().valueAs() || - (bImm << opData.bImmDiscardLsb) != o3.as().valueAs()) + if ((a_imm << op_data.a_imm_discard_lsb) != o2.as().value_as() || + (b_imm << op_data.b_imm_discard_lsb) != o3.as().value_as()) goto InvalidImmediate; - opcode.reset(opData.opcode()); - opcode.addImm(aImm, opData.aImmOffset); - opcode.addImm(bImm, opData.bImmOffset); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(a_imm, op_data.a_imm_offset); + opcode.add_imm(b_imm, op_data.b_imm_offset); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1037,74 +1074,74 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseMov: { // MOV is a pseudo instruction that uses various instructions depending on its signature. - uint32_t x = diff(o0.as().type(), RegType::kARM_GpW); + uint32_t x = diff(o0.as().reg_type(), RegType::kGp32); if (x > 1) goto InvalidInstruction; if (isign4 == ENC_OPS2(Reg, Reg)) { - if (!o0.as().isGp()) + if (!o0.as().is_gp()) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - bool hasSP = o0.as().isSP() || o1.as().isSP(); - if (hasSP) { + bool has_sp = o0.as().is_sp() || o1.as().is_sp(); + if (has_sp) { // Cannot be combined with ZR. - if (!checkGpId(o0, o1, kSP)) + if (!check_gp_id(o0, o1, kSP)) goto InvalidPhysId; // MOV Rd, Rm -> ADD Rd, Rn, #0. opcode.reset(0b00010001000000000000000000000000); - opcode.addImm(x, 31); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } else { - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; // MOV Rd, Rm -> ORR Rd, , Rm. opcode.reset(0b00101010000000000000001111100000); - opcode.addImm(x, 31); - opcode.addReg(o1, 16); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 16); + opcode.add_reg(o0, 0); goto EmitOp; } } if (isign4 == ENC_OPS2(Reg, Imm)) { - if (!o0.as().isGp()) + if (!o0.as().is_gp()) goto InvalidInstruction; - uint64_t immValue = o1.as().valueAs(); + uint64_t imm_value = o1.as().value_as(); if (!x) - immValue &= 0xFFFFFFFFu; + imm_value &= 0xFFFFFFFFu; // Prefer a single MOVN/MOVZ instruction over a logical instruction. - multipleOpCount = encodeMovSequence64(multipleOpData, immValue, o0.id() & 31, x); - if (multipleOpCount == 1 && !o0.as().isSP()) { - opcode.reset(multipleOpData[0]); + multiple_op_count = encode_mov_sequence_64(multiple_op_data, imm_value, o0.id() & 31, x); + if (multiple_op_count == 1 && !o0.as().is_sp()) { + opcode.reset(multiple_op_data[0]); goto EmitOp; } // Logical instructions use 13-bit immediate pattern encoded as N:ImmR:ImmS. - LogicalImm logicalImm; - if (!o0.as().isZR()) { - if (Utils::encodeLogicalImm(immValue, x ? 64 : 32, &logicalImm)) { - if (!checkGpId(o0, kSP)) + LogicalImm logical_imm; + if (!o0.as().is_zr()) { + if (Utils::encode_logical_imm(imm_value, x ? 64 : 32, Out(logical_imm))) { + if (!check_gp_id(o0, kSP)) goto InvalidPhysId; opcode.reset(0b00110010000000000000001111100000); - opcode.addImm(x, 31); - opcode.addLogicalImm(logicalImm); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_logical_imm(logical_imm); + opcode.add_reg(o0, 0); goto EmitOp; } } - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; goto EmitOp_Multiple; @@ -1114,34 +1151,34 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseMovKNZ: { - const InstDB::EncodingData::BaseMovKNZ& opData = InstDB::EncodingData::baseMovKNZ[encodingIndex]; + const InstDB::EncodingData::BaseMovKNZ& op_data = InstDB::EncodingData::baseMovKNZ[encoding_index]; - uint32_t x = diff(o0.as().type(), RegType::kARM_GpW); + uint32_t x = diff(o0.as().reg_type(), RegType::kGp32); if (x > 1) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); if (isign4 == ENC_OPS2(Reg, Imm)) { - uint64_t imm16 = o1.as().valueAs(); + uint64_t imm16 = o1.as().value_as(); if (imm16 > 0xFFFFu) goto InvalidImmediate; - opcode.addImm(imm16, 5); - opcode.addReg(o0, 0); + opcode.add_imm(imm16, 5); + opcode.add_reg(o0, 0); goto EmitOp; } if (isign4 == ENC_OPS3(Reg, Imm, Imm)) { - uint64_t imm16 = o1.as().valueAs(); - uint32_t shiftType = o2.as().predicate(); - uint64_t shiftValue = o2.as().valueAs(); + uint64_t imm16 = o1.as().value_as(); + uint32_t shift_type = o2.as().predicate(); + uint64_t shiftValue = o2.as().value_as(); - if (imm16 > 0xFFFFu || shiftValue > 48 || shiftType != uint32_t(ShiftOp::kLSL)) + if (imm16 > 0xFFFFu || shiftValue > 48 || shift_type != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; // Convert shift value to 'hw' field. @@ -1149,9 +1186,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co if ((hw << 4) != uint32_t(shiftValue)) goto InvalidImmediate; - opcode.addImm(hw, 21); - opcode.addImm(imm16, 5); - opcode.addReg(o0, 0); + opcode.add_imm(hw, 21); + opcode.add_imm(imm16, 5); + opcode.add_reg(o0, 0); if (!x && hw > 1u) goto InvalidImmediate; @@ -1167,23 +1204,23 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseAdr: { - const InstDB::EncodingData::BaseAdr& opData = InstDB::EncodingData::baseAdr[encodingIndex]; + const InstDB::EncodingData::BaseAdr& op_data = InstDB::EncodingData::baseAdr[encoding_index]; if (isign4 == ENC_OPS2(Reg, Label) || isign4 == ENC_OPS2(Reg, Imm)) { - if (!o0.as().isGpX()) + if (!o0.as().is_gp64()) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addReg(o0, 0); - offsetFormat.resetToImmValue(opData.offsetType, 4, 5, 21, 0); + opcode.reset(op_data.opcode()); + opcode.add_reg(o0, 0); + offset_format.reset_to_imm_value(op_data.offset_type, 4, 5, 21, 0); - if (instId == Inst::kIdAdrp) - offsetFormat._immDiscardLsb = 12; + if (inst_id == Inst::kIdAdrp) + offset_format._imm_discard_lsb = 12; - rmRel = &o1; + rm_rel = &o1; goto EmitOp_Rel; } @@ -1195,25 +1232,25 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseAddSub: { - const InstDB::EncodingData::BaseAddSub& opData = InstDB::EncodingData::baseAddSub[encodingIndex]; + const InstDB::EncodingData::BaseAddSub& op_data = InstDB::EncodingData::baseAddSub[encoding_index]; uint32_t x; - if (!checkGpType(o0, o1, kWX, &x)) + if (!check_gp_type(o0, o1, kWX, &x)) goto InvalidInstruction; if (isign4 == ENC_OPS3(Reg, Reg, Imm) || isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { - opcode.reset(uint32_t(opData.immediateOp) << 24); + opcode.reset(uint32_t(op_data.immediate_op) << 24); // ADD | SUB (immediate) - ZR is not allowed. // ADDS|SUBS (immediate) - ZR allowed in Rd, SP allowed in Rn. - uint32_t aHiId = opcode.get() & B(29) ? kZR : kSP; - uint32_t bHiId = kSP; + uint32_t a_hi_id = opcode.get() & B(29) ? kZR : kSP; + uint32_t b_hi_id = kSP; - if (!checkGpId(o0, aHiId) || !checkGpId(o1, bHiId)) + if (!check_gp_id(o0, a_hi_id) || !check_gp_id(o1, b_hi_id)) goto InvalidPhysId; // ADD|SUB (immediate) use 12-bit immediate optionally shifted by 'LSL #12'. - uint64_t imm = o2.as().valueAs(); + uint64_t imm = o2.as().value_as(); uint32_t shift = 0; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { @@ -1234,91 +1271,91 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co imm >>= 12; } - opcode.addImm(x, 31); - opcode.addImm(shift, 22); - opcode.addImm(imm, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_imm(shift, 22); + opcode.add_imm(imm, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } if (isign4 == ENC_OPS3(Reg, Reg, Reg) || isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - uint32_t opSize = x ? 64 : 32; + uint32_t op_size = x ? 64 : 32; uint64_t shift = 0; - uint32_t sType = uint32_t(ShiftOp::kLSL); + uint32_t shift_type = uint32_t(ShiftOp::kLSL); if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - sType = o3.as().predicate(); - shift = o3.as().valueAs(); + shift_type = o3.as().predicate(); + shift = o3.as().value_as(); } - if (!checkGpId(o2, kZR)) + if (!check_gp_id(o2, kZR)) goto InvalidPhysId; // Shift operation - LSL, LSR, ASR. - if (sType <= uint32_t(ShiftOp::kASR)) { - bool hasSP = o0.as().isSP() || o1.as().isSP(); - if (!hasSP) { - if (!checkSignature(o1, o2)) { + if (shift_type <= uint32_t(ShiftOp::kASR)) { + bool has_sp = o0.as().is_sp() || o1.as().is_sp(); + if (!has_sp) { + if (!check_signature(o1, o2)) { goto InvalidInstruction; } - if (!checkGpId(o0, o1, kZR)) { + if (!check_gp_id(o0, o1, kZR)) { goto InvalidPhysId; } - if (shift >= opSize) { + if (shift >= op_size) { goto InvalidImmediate; } - opcode.reset(uint32_t(opData.shiftedOp) << 21); - opcode.addImm(x, 31); - opcode.addImm(sType, 22); - opcode.addReg(o2, 16); - opcode.addImm(shift, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.shifted_op) << 21); + opcode.add_imm(x, 31); + opcode.add_imm(shift_type, 22); + opcode.add_reg(o2, 16); + opcode.add_imm(shift, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } // SP register can only be used with LSL or Extend. - if (sType != uint32_t(ShiftOp::kLSL)) { + if (shift_type != uint32_t(ShiftOp::kLSL)) { goto InvalidImmediate; } - sType = x ? uint32_t(ShiftOp::kUXTX) : uint32_t(ShiftOp::kUXTW); + shift_type = x ? uint32_t(ShiftOp::kUXTX) : uint32_t(ShiftOp::kUXTW); } // Extend operation - UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX. - opcode.reset(uint32_t(opData.extendedOp) << 21); - sType -= uint32_t(ShiftOp::kUXTB); + opcode.reset(uint32_t(op_data.extended_op) << 21); + shift_type -= uint32_t(ShiftOp::kUXTB); - if (sType > 7 || shift > 4) { + if (shift_type > 7 || shift > 4) { goto InvalidImmediate; } if (!(opcode.get() & B(29))) { // ADD|SUB (extend) - ZR is not allowed. - if (!checkGpId(o0, o1, kSP)) + if (!check_gp_id(o0, o1, kSP)) goto InvalidPhysId; } else { // ADDS|SUBS (extend) - ZR allowed in Rd, SP allowed in Rn. - if (!checkGpId(o0, kZR) || !checkGpId(o1, kSP)) + if (!check_gp_id(o0, kZR) || !check_gp_id(o1, kSP)) goto InvalidPhysId; } // Validate whether the register operands match extend option. - if (o2.as().type() != extendOptionToRegType(sType) || o1.as().type() < o2.as().type()) { + if (o2.as().reg_type() != extend_option_to_reg_type(shift_type) || o1.as().reg_type() < o2.as().reg_type()) { goto InvalidInstruction; } - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addImm(sType, 13); - opcode.addImm(shift, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_imm(shift_type, 13); + opcode.add_imm(shift, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1326,78 +1363,78 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseLogical: { - const InstDB::EncodingData::BaseLogical& opData = InstDB::EncodingData::baseLogical[encodingIndex]; + const InstDB::EncodingData::BaseLogical& op_data = InstDB::EncodingData::baseLogical[encoding_index]; uint32_t x; - if (!checkGpType(o0, o1, kWX, &x)) + if (!check_gp_type(o0, o1, kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - uint32_t opSize = x ? 64 : 32; + uint32_t op_size = x ? 64 : 32; - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.immediateOp != 0) { - opcode.reset(uint32_t(opData.immediateOp) << 23); + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.immediate_op != 0) { + opcode.reset(uint32_t(op_data.immediate_op) << 23); // AND|ANDS|BIC|BICS|ORR|EOR (immediate) uses a LogicalImm format described by N:R:S values. - uint64_t immMask = Support::lsbMask(opSize); - uint64_t immValue = o2.as().valueAs(); + uint64_t imm_mask = Support::lsb_mask(op_size); + uint64_t imm_value = o2.as().value_as(); - if (opData.negateImm) - immValue ^= immMask; + if (op_data.negate_imm) + imm_value ^= imm_mask; // Logical instructions use 13-bit immediate pattern encoded as N:ImmS:ImmR. - LogicalImm logicalImm; - if (!Utils::encodeLogicalImm(immValue & immMask, opSize, &logicalImm)) + LogicalImm logical_imm; + if (!Utils::encode_logical_imm(imm_value & imm_mask, op_size, Out(logical_imm))) goto InvalidImmediate; // AND|BIC|ORR|EOR (immediate) can have SP on destination, but ANDS|BICS (immediate) cannot. uint32_t kOpANDS = 0x3 << 29; bool isANDS = (opcode.get() & kOpANDS) == kOpANDS; - if (!checkGpId(o0, isANDS ? kZR : kSP) || !checkGpId(o1, kZR)) + if (!check_gp_id(o0, isANDS ? kZR : kSP) || !check_gp_id(o1, kZR)) goto InvalidPhysId; - opcode.addImm(x, 31); - opcode.addLogicalImm(logicalImm); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_logical_imm(logical_imm); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } - if (!checkSignature(o1, o2)) + if (!check_signature(o1, o2)) goto InvalidInstruction; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!checkGpId(o0, o1, o2, kZR)) + if (!check_gp_id(o0, o1, o2, kZR)) goto InvalidPhysId; - opcode.reset(uint32_t(opData.shiftedOp) << 21); - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.shifted_op) << 21); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - if (!checkGpId(o0, o1, o2, kZR)) + if (!check_gp_id(o0, o1, o2, kZR)) goto InvalidPhysId; - uint32_t shiftType = o3.as().predicate(); - uint64_t opShift = o3.as().valueAs(); + uint32_t shift_type = o3.as().predicate(); + uint64_t op_shift = o3.as().value_as(); - if (shiftType > 0x3 || opShift >= opSize) + if (shift_type > 0x3 || op_shift >= op_size) goto InvalidImmediate; - opcode.reset(uint32_t(opData.shiftedOp) << 21); - opcode.addImm(x, 31); - opcode.addImm(shiftType, 22); - opcode.addReg(o2, 16); - opcode.addImm(opShift, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.shifted_op) << 21); + opcode.add_imm(x, 31); + opcode.add_imm(shift_type, 22); + opcode.add_reg(o2, 16); + opcode.add_imm(op_shift, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1405,96 +1442,96 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseCmpCmn: { - const InstDB::EncodingData::BaseCmpCmn& opData = InstDB::EncodingData::baseCmpCmn[encodingIndex]; + const InstDB::EncodingData::BaseCmpCmn& op_data = InstDB::EncodingData::baseCmpCmn[encoding_index]; uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; if (isign4 == ENC_OPS2(Reg, Imm)) { // CMN|CMP (immediate) - ZR is not allowed. - if (!checkGpId(o0, kSP)) + if (!check_gp_id(o0, kSP)) goto InvalidPhysId; // CMN|CMP (immediate) use 12-bit immediate optionally shifted by 'LSL #12'. const Imm& imm12 = o1.as(); - uint32_t immShift = 0; - uint64_t immValue = imm12.valueAs(); + uint32_t imm_shift = 0; + uint64_t imm_value = imm12.value_as(); - if (immValue > 0xFFFu) { - if ((immValue & ~uint64_t(0xFFFu << 12)) != 0) + if (imm_value > 0xFFFu) { + if ((imm_value & ~uint64_t(0xFFFu << 12)) != 0) goto InvalidImmediate; - immShift = 1; - immValue >>= 12; + imm_shift = 1; + imm_value >>= 12; } - opcode.reset(uint32_t(opData.immediateOp) << 24); - opcode.addImm(x, 31); - opcode.addImm(immShift, 22); - opcode.addImm(immValue, 10); - opcode.addReg(o0, 5); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(uint32_t(op_data.immediate_op) << 24); + opcode.add_imm(x, 31); + opcode.add_imm(imm_shift, 22); + opcode.add_imm(imm_value, 10); + opcode.add_reg(o0, 5); + opcode.add_reg(Gp::kIdZr, 0); goto EmitOp; } if (isign4 == ENC_OPS2(Reg, Reg) || isign4 == ENC_OPS3(Reg, Reg, Imm)) { - uint32_t opSize = x ? 64 : 32; - uint32_t sType = 0; - uint64_t shift = 0; + uint32_t op_size = x ? 64 : 32; + uint32_t shift_type = 0; + uint64_t shift_value = 0; if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { - sType = o2.as().predicate(); - shift = o2.as().valueAs(); + shift_type = o2.as().predicate(); + shift_value = o2.as().value_as(); } - bool hasSP = o0.as().isSP() || o1.as().isSP(); + bool has_sp = o0.as().is_sp() || o1.as().is_sp(); // Shift operation - LSL, LSR, ASR. - if (sType <= uint32_t(ShiftOp::kASR)) { - if (!hasSP) { - if (!checkSignature(o0, o1)) { + if (shift_type <= uint32_t(ShiftOp::kASR)) { + if (!has_sp) { + if (!check_signature(o0, o1)) { goto InvalidInstruction; } - if (shift >= opSize) { + if (shift_value >= op_size) { goto InvalidImmediate; } - opcode.reset(uint32_t(opData.shiftedOp) << 21); - opcode.addImm(x, 31); - opcode.addImm(sType, 22); - opcode.addReg(o1, 16); - opcode.addImm(shift, 10); - opcode.addReg(o0, 5); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(uint32_t(op_data.shifted_op) << 21); + opcode.add_imm(x, 31); + opcode.add_imm(shift_type, 22); + opcode.add_reg(o1, 16); + opcode.add_imm(shift_value, 10); + opcode.add_reg(o0, 5); + opcode.add_reg(Gp::kIdZr, 0); goto EmitOp; } // SP register can only be used with LSL or Extend. - if (sType != uint32_t(ShiftOp::kLSL)) + if (shift_type != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; - sType = x ? uint32_t(ShiftOp::kUXTX) : uint32_t(ShiftOp::kUXTW); + shift_type = x ? uint32_t(ShiftOp::kUXTX) : uint32_t(ShiftOp::kUXTW); } // Extend operation - UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX. - sType -= uint32_t(ShiftOp::kUXTB); - if (sType > 7 || shift > 4) { + shift_type -= uint32_t(ShiftOp::kUXTB); + if (shift_type > 7 || shift_value > 4) { goto InvalidImmediate; } // Validate whether the register operands match extend option. - if (o1.as().type() != extendOptionToRegType(sType) || o0.as().type() < o1.as().type()) { + if (o1.as().reg_type() != extend_option_to_reg_type(shift_type) || o0.as().reg_type() < o1.as().reg_type()) { goto InvalidInstruction; } - opcode.reset(uint32_t(opData.extendedOp) << 21); - opcode.addImm(x, 31); - opcode.addReg(o1, 16); - opcode.addImm(sType, 13); - opcode.addImm(shift, 10); - opcode.addReg(o0, 5); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(uint32_t(op_data.extended_op) << 21); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 16); + opcode.add_imm(shift_type, 13); + opcode.add_imm(shift_value, 10); + opcode.add_reg(o0, 5); + opcode.add_reg(Gp::kIdZr, 0); goto EmitOp; } @@ -1502,37 +1539,37 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseMvnNeg: { - const InstDB::EncodingData::BaseMvnNeg& opData = InstDB::EncodingData::baseMvnNeg[encodingIndex]; + const InstDB::EncodingData::BaseMvnNeg& op_data = InstDB::EncodingData::baseMvnNeg[encoding_index]; uint32_t x; - if (!checkGpType(o0, o1, kWX, &x)) + if (!check_gp_type(o0, o1, kWX, &x)) goto InvalidInstruction; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addReg(o1, 16); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 16); + opcode.add_reg(o0, 0); if (isign4 == ENC_OPS2(Reg, Reg)) { - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; goto EmitOp; } if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - uint32_t opSize = x ? 64 : 32; - uint32_t shiftType = o2.as().predicate(); - uint64_t opShift = o2.as().valueAs(); + uint32_t op_size = x ? 64 : 32; + uint32_t shift_type = o2.as().predicate(); + uint64_t shift_value = o2.as().value_as(); - if (shiftType > uint32_t(ShiftOp::kROR) || opShift >= opSize) + if (shift_type > uint32_t(ShiftOp::kROR) || shift_value >= op_size) goto InvalidImmediate; - opcode.addImm(shiftType, 22); - opcode.addImm(opShift, 10); + opcode.add_imm(shift_type, 22); + opcode.add_imm(shift_value, 10); goto EmitOp; } @@ -1540,60 +1577,60 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseTst: { - const InstDB::EncodingData::BaseTst& opData = InstDB::EncodingData::baseTst[encodingIndex]; + const InstDB::EncodingData::BaseTst& op_data = InstDB::EncodingData::baseTst[encoding_index]; uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; - uint32_t opSize = x ? 64 : 32; + uint32_t op_size = x ? 64 : 32; - if (isign4 == ENC_OPS2(Reg, Imm) && opData.immediateOp != 0) { - if (!checkGpId(o0, kZR)) + if (isign4 == ENC_OPS2(Reg, Imm) && op_data.immediate_op != 0) { + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; // TST (immediate) uses a LogicalImm format described by N:R:S values. - uint64_t immMask = Support::lsbMask(opSize); - uint64_t immValue = o1.as().valueAs(); + uint64_t imm_mask = Support::lsb_mask(op_size); + uint64_t imm_value = o1.as().value_as(); // Logical instructions use 13-bit immediate pattern encoded as N:ImmS:ImmR. - LogicalImm logicalImm; - if (!Utils::encodeLogicalImm(immValue & immMask, opSize, &logicalImm)) + LogicalImm logical_imm; + if (!Utils::encode_logical_imm(imm_value & imm_mask, op_size, Out(logical_imm))) goto InvalidImmediate; - opcode.reset(uint32_t(opData.immediateOp) << 22); - opcode.addLogicalImm(logicalImm); - opcode.addImm(x, 31); - opcode.addReg(o0, 5); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(uint32_t(op_data.immediate_op) << 22); + opcode.add_logical_imm(logical_imm); + opcode.add_imm(x, 31); + opcode.add_reg(o0, 5); + opcode.add_reg(Gp::kIdZr, 0); goto EmitOp; } - opcode.reset(uint32_t(opData.shiftedOp) << 21); - opcode.addImm(x, 31); - opcode.addReg(o1, 16); - opcode.addReg(o0, 5); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(uint32_t(op_data.shifted_op) << 21); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 16); + opcode.add_reg(o0, 5); + opcode.add_reg(Gp::kIdZr, 0); if (isign4 == ENC_OPS2(Reg, Reg)) { - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; goto EmitOp; } if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - uint32_t shiftType = o2.as().predicate(); - uint64_t opShift = o2.as().valueAs(); + uint32_t shift_type = o2.as().predicate(); + uint64_t op_shift = o2.as().value_as(); - if (shiftType > 0x3 || opShift >= opSize) + if (shift_type > 0x3 || op_shift >= op_size) goto InvalidImmediate; - opcode.addImm(shiftType, 22); - opcode.addImm(opShift, 10); + opcode.add_imm(shift_type, 22); + opcode.add_imm(op_shift, 10); goto EmitOp; } @@ -1605,32 +1642,32 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseBfc: { - const InstDB::EncodingData::BaseBfc& opData = InstDB::EncodingData::baseBfc[encodingIndex]; + const InstDB::EncodingData::BaseBfc& op_data = InstDB::EncodingData::baseBfc[encoding_index]; if (isign4 == ENC_OPS3(Reg, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0)) + if (!check_gp_id(o0)) goto InvalidPhysId; - uint64_t lsb = o1.as().valueAs(); - uint64_t width = o2.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t lsb = o1.as().value_as(); + uint64_t width = o2.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if (lsb >= opSize || width == 0 || width > opSize) + if (lsb >= op_size || width == 0 || width > op_size) goto InvalidImmediate; - uint32_t lsb32 = Support::neg(uint32_t(lsb)) & (opSize - 1); + uint32_t lsb32 = Support::neg(uint32_t(lsb)) & (op_size - 1); uint32_t width32 = uint32_t(width) - 1; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addImm(lsb32, 16); - opcode.addImm(width32, 10); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_imm(lsb32, 16); + opcode.add_imm(width32, 10); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1638,36 +1675,36 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBfi: { - const InstDB::EncodingData::BaseBfi& opData = InstDB::EncodingData::baseBfi[encodingIndex]; + const InstDB::EncodingData::BaseBfi& op_data = InstDB::EncodingData::baseBfi[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1)) + if (!check_gp_id(o0, o1)) goto InvalidPhysId; - uint64_t lsb = o2.as().valueAs(); - uint64_t width = o3.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t lsb = o2.as().value_as(); + uint64_t width = o3.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if (lsb >= opSize || width == 0 || width > opSize) + if (lsb >= op_size || width == 0 || width > op_size) goto InvalidImmediate; - uint32_t lImm = Support::neg(uint32_t(lsb)) & (opSize - 1); - uint32_t wImm = uint32_t(width) - 1; + uint32_t imm_l = Support::neg(uint32_t(lsb)) & (op_size - 1); + uint32_t imm_w = uint32_t(width) - 1; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addImm(lImm, 16); - opcode.addImm(wImm, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_imm(imm_l, 16); + opcode.add_imm(imm_w, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1675,33 +1712,33 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBfm: { - const InstDB::EncodingData::BaseBfm& opData = InstDB::EncodingData::baseBfm[encodingIndex]; + const InstDB::EncodingData::BaseBfm& op_data = InstDB::EncodingData::baseBfm[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1)) + if (!check_gp_id(o0, o1)) goto InvalidPhysId; - uint64_t immR = o2.as().valueAs(); - uint64_t immS = o3.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t imm_r = o2.as().value_as(); + uint64_t imm_s = o3.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if ((immR | immS) >= opSize) + if ((imm_r | imm_s) >= op_size) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addImm(immR, 16); - opcode.addImm(immS, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_imm(imm_r, 16); + opcode.add_imm(imm_s, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1709,39 +1746,39 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBfx: { - const InstDB::EncodingData::BaseBfx& opData = InstDB::EncodingData::baseBfx[encodingIndex]; + const InstDB::EncodingData::BaseBfx& op_data = InstDB::EncodingData::baseBfx[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1)) + if (!check_gp_id(o0, o1)) goto InvalidPhysId; - uint64_t lsb = o2.as().valueAs(); - uint64_t width = o3.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t lsb = o2.as().value_as(); + uint64_t width = o3.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if (lsb >= opSize || width == 0 || width > opSize) + if (lsb >= op_size || width == 0 || width > op_size) goto InvalidImmediate; uint32_t lsb32 = uint32_t(lsb); uint32_t width32 = lsb32 + uint32_t(width) - 1u; - if (width32 >= opSize) + if (width32 >= op_size) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addImm(lsb32, 16); - opcode.addImm(width32, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_imm(lsb32, 16); + opcode.add_imm(width32, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1749,24 +1786,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseExtend: { - const InstDB::EncodingData::BaseExtend& opData = InstDB::EncodingData::baseExtend[encodingIndex]; + const InstDB::EncodingData::BaseExtend& op_data = InstDB::EncodingData::baseExtend[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!o1.as().isGpW()) + if (!o1.as().is_gp32()) goto InvalidInstruction; - if (!checkGpId(o0, o1)) + if (!check_gp_id(o0, o1)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1774,32 +1811,32 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseExtract: { - const InstDB::EncodingData::BaseExtract& opData = InstDB::EncodingData::baseExtract[encodingIndex]; + const InstDB::EncodingData::BaseExtract& op_data = InstDB::EncodingData::baseExtract[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1, o2)) + if (!check_signature(o0, o1, o2)) goto InvalidInstruction; - if (!checkGpId(o0, o1, o2)) + if (!check_gp_id(o0, o1, o2)) goto InvalidPhysId; - uint64_t lsb = o3.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t lsb = o3.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if (lsb >= opSize) + if (lsb >= op_size) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addReg(o2, 16); - opcode.addImm(lsb, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_reg(o2, 16); + opcode.add_imm(lsb, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1809,20 +1846,20 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseRev: { if (isign4 == ENC_OPS2(Reg, Reg)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1)) + if (!check_gp_id(o0, o1)) goto InvalidPhysId; opcode.reset(0b01011010110000000000100000000000); - opcode.addImm(x, 31); - opcode.addImm(x, 10); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.add_imm(x, 31); + opcode.add_imm(x, 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1830,66 +1867,66 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseShift: { - const InstDB::EncodingData::BaseShift& opData = InstDB::EncodingData::baseShift[encodingIndex]; + const InstDB::EncodingData::BaseShift& op_data = InstDB::EncodingData::baseShift[encoding_index]; uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!checkSignature(o0, o1, o2)) + if (!check_signature(o0, o1, o2)) goto InvalidInstruction; - if (!checkGpId(o0, o1, o2, kZR)) + if (!check_gp_id(o0, o1, o2, kZR)) goto InvalidPhysId; - opcode.reset(opData.registerOp()); - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.register_op()); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.immediateOp()) { - if (!checkSignature(o0, o1)) + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.immediate_op()) { + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - uint64_t immR = o2.as().valueAs(); - uint32_t opSize = x ? 64 : 32; + uint64_t imm_r = o2.as().value_as(); + uint32_t op_size = x ? 64 : 32; - if (immR >= opSize) + if (imm_r >= op_size) goto InvalidImmediate; - opcode.reset(opData.immediateOp()); - opcode.addImm(x, 31); - opcode.addImm(x, 22); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.immediate_op()); + opcode.add_imm(x, 31); + opcode.add_imm(x, 22); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); if (opcode.get() & B(10)) { // ASR and LSR (immediate) has the same logic. - opcode.addImm(x, 15); - opcode.addImm(immR, 16); + opcode.add_imm(x, 15); + opcode.add_imm(imm_r, 16); goto EmitOp; } - if (opData.ror == 0) { + if (op_data.ror == 0) { // LSL (immediate) is an alias to UBFM - uint32_t ubfmImmR = Support::neg(uint32_t(immR)) & (opSize - 1); - uint32_t ubfmImmS = opSize - 1 - uint32_t(immR); + uint32_t ubfm_imm_r = Support::neg(uint32_t(imm_r)) & (op_size - 1); + uint32_t ubfm_imm_s = op_size - 1 - uint32_t(imm_r); - opcode.addImm(ubfmImmR, 16); - opcode.addImm(ubfmImmS, 10); + opcode.add_imm(ubfm_imm_r, 16); + opcode.add_imm(ubfm_imm_s, 10); goto EmitOp; } else { // ROR (immediate) is an alias to EXTR. - opcode.addImm(immR, 10); - opcode.addReg(o1, 16); + opcode.add_imm(imm_r, 10); + opcode.add_reg(o1, 16); goto EmitOp; } } @@ -1902,48 +1939,48 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseCCmp: { - const InstDB::EncodingData::BaseCCmp& opData = InstDB::EncodingData::baseCCmp[encodingIndex]; + const InstDB::EncodingData::BaseCCmp& op_data = InstDB::EncodingData::baseCCmp[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm) || isign4 == ENC_OPS4(Reg, Imm, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - uint64_t nzcv = o2.as().valueAs(); - uint64_t cond = o3.as().valueAs(); + uint64_t nzcv = o2.as().value_as(); + uint64_t cond = o3.as().value_as(); if ((nzcv | cond) > 0xFu) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)), 12); - opcode.addImm(nzcv, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)), 12); + opcode.add_imm(nzcv, 0); if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { // CCMN|CCMP (register) form. - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o1, kZR)) + if (!check_gp_id(o1, kZR)) goto InvalidPhysId; - opcode.addReg(o1, 16); - opcode.addReg(o0, 5); + opcode.add_reg(o1, 16); + opcode.add_reg(o0, 5); goto EmitOp; } else { // CCMN|CCMP (immediate) form. - uint64_t imm5 = o1.as().valueAs(); + uint64_t imm5 = o1.as().value_as(); if (imm5 > 0x1F) goto InvalidImmediate; - opcode.addImm(1, 11); - opcode.addImm(imm5, 16); - opcode.addReg(o0, 5); + opcode.add_imm(1, 11); + opcode.add_imm(imm5, 16); + opcode.add_reg(o0, 5); goto EmitOp; } } @@ -1952,26 +1989,26 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseCInc: { - const InstDB::EncodingData::BaseCInc& opData = InstDB::EncodingData::baseCInc[encodingIndex]; + const InstDB::EncodingData::BaseCInc& op_data = InstDB::EncodingData::baseCInc[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { uint32_t x; - if (!checkGpType(o0, o1, InstDB::kWX, &x)) + if (!check_gp_type(o0, o1, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - uint64_t cond = o2.as().valueAs(); + uint64_t cond = o2.as().value_as(); if (cond - 2u > 0xEu) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addReg(o1, 16); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)) ^ 1u, 12); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_reg(o1, 16); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)) ^ 1u, 12); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -1979,26 +2016,26 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseCSel: { - const InstDB::EncodingData::BaseCSel& opData = InstDB::EncodingData::baseCSel[encodingIndex]; + const InstDB::EncodingData::BaseCSel& op_data = InstDB::EncodingData::baseCSel[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { uint32_t x; - if (!checkGpType(o0, o1, o2, InstDB::kWX, &x)) + if (!check_gp_type(o0, o1, o2, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, o1, o2, kZR)) + if (!check_gp_id(o0, o1, o2, kZR)) goto InvalidPhysId; - uint64_t cond = o3.as().valueAs(); + uint64_t cond = o3.as().value_as(); if (cond > 0xFu) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addReg(o2, 16); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)), 12); - opcode.addReg(o1, 5); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)), 12); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -2006,24 +2043,81 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseCSet: { - const InstDB::EncodingData::BaseCSet& opData = InstDB::EncodingData::baseCSet[encodingIndex]; + const InstDB::EncodingData::BaseCSet& op_data = InstDB::EncodingData::baseCSet[encoding_index]; if (isign4 == ENC_OPS2(Reg, Imm)) { uint32_t x; - if (!checkGpType(o0, InstDB::kWX, &x)) + if (!check_gp_type(o0, InstDB::kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - uint64_t cond = o1.as().valueAs(); + uint64_t cond = o1.as().value_as(); if (cond - 2u >= 0xEu) goto InvalidImmediate; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)) ^ 1u, 12); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)) ^ 1u, 12); + opcode.add_reg(o0, 0); + goto EmitOp; + } + + break; + } + + // ------------------------------------------------------------------------ + // [Base - Min/Max] + // ------------------------------------------------------------------------ + + case InstDB::kEncodingBaseMinMax: { + const InstDB::EncodingData::BaseMinMax& op_data = InstDB::EncodingData::baseMinMax[encoding_index]; + + if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { + uint32_t x; + if (!check_gp_type(o0, InstDB::kWX, &x)) + goto InvalidInstruction; + + if (!check_signature(o0, o1, o2)) + goto InvalidInstruction; + + opcode.reset(op_data.register_op); + opcode.add_imm(x, 31); + opcode.add_reg(o2, 16); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); + goto EmitOp; + } + + if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { + uint32_t x; + if (!check_gp_type(o0, InstDB::kWX, &x)) + goto InvalidInstruction; + + if (!check_signature(o0, o1)) + goto InvalidInstruction; + + uint64_t imm = o2.as().value_as(); + + if (op_data.immediate_op & (1u << 18)) { + // Zero extend imm. + if (!Support::is_uint_n<8>(imm)) { + goto InvalidImmediate; + } + } + else { + // Sign extend imm. + if (!Support::is_int_n<8>(int64_t(imm))) { + goto InvalidImmediate; + } + } + + opcode.reset(op_data.immediate_op); + opcode.add_imm(x, 31); + opcode.add_imm(uint32_t(imm & 0xFFu), 10); + opcode.add_reg(o1, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -2035,33 +2129,33 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseAtDcIcTlbi: { - const InstDB::EncodingData::BaseAtDcIcTlbi& opData = InstDB::EncodingData::baseAtDcIcTlbi[encodingIndex]; + const InstDB::EncodingData::BaseAtDcIcTlbi& op_data = InstDB::EncodingData::baseAtDcIcTlbi[encoding_index]; if (isign4 == ENC_OPS1(Imm) || isign4 == ENC_OPS2(Imm, Reg)) { - if (opData.mandatoryReg && isign4 != ENC_OPS2(Imm, Reg)) + if (op_data.mandatory_reg && isign4 != ENC_OPS2(Imm, Reg)) goto InvalidInstruction; - if (o0.as().valueAs() > 0x7FFFu) + if (o0.as().value_as() > 0x7FFFu) goto InvalidImmediate; - uint32_t imm = o0.as().valueAs(); - if ((imm & opData.immVerifyMask) != opData.immVerifyData) + uint32_t imm = o0.as().value_as(); + if ((imm & op_data.imm_verify_mask) != op_data.imm_verify_data) goto InvalidImmediate; uint32_t rt = 31; - if (o1.isReg()) { - if (!o1.as().isGpX()) + if (o1.is_reg()) { + if (!o1.as().is_gp64()) goto InvalidInstruction; - if (!checkGpId(o1, kZR)) + if (!check_gp_id(o1, kZR)) goto InvalidPhysId; rt = o1.id() & 31; } opcode.reset(0b11010101000010000000000000000000); - opcode.addImm(imm, 5); - opcode.addReg(rt, 0); + opcode.add_imm(imm, 5); + opcode.add_reg(rt, 0); goto EmitOp; } break; @@ -2069,22 +2163,22 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseMrs: { if (isign4 == ENC_OPS2(Reg, Imm)) { - if (!o0.as().isGpX()) + if (!o0.as().is_gp64()) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - if (o1.as().valueAs() > 0xFFFFu) + if (o1.as().value_as() > 0xFFFFu) goto InvalidImmediate; - uint32_t imm = o1.as().valueAs(); + uint32_t imm = o1.as().value_as(); if (!(imm & B(15))) goto InvalidImmediate; opcode.reset(0b11010101001100000000000000000000); - opcode.addImm(imm, 5); - opcode.addReg(o0, 0); + opcode.add_imm(imm, 5); + opcode.add_reg(o0, 0); goto EmitOp; } @@ -2093,42 +2187,42 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseMsr: { if (isign4 == ENC_OPS2(Imm, Reg)) { - if (!o1.as().isGpX()) + if (!o1.as().is_gp64()) goto InvalidInstruction; - if (o0.as().valueAs() > 0xFFFFu) + if (o0.as().value_as() > 0xFFFFu) goto InvalidImmediate; - uint32_t imm = o0.as().valueAs(); + uint32_t imm = o0.as().value_as(); if (!(imm & B(15))) goto InvalidImmediate; - if (!checkGpId(o1, kZR)) + if (!check_gp_id(o1, kZR)) goto InvalidPhysId; opcode.reset(0b11010101000100000000000000000000); - opcode.addImm(imm, 5); - opcode.addReg(o1, 0); + opcode.add_imm(imm, 5); + opcode.add_reg(o1, 0); goto EmitOp; } if (isign4 == ENC_OPS2(Imm, Imm)) { - if (o0.as().valueAs() > 0x1Fu) + if (o0.as().value_as() > 0x1Fu) goto InvalidImmediate; - if (o1.as().valueAs() > 0xFu) + if (o1.as().value_as() > 0xFu) goto InvalidImmediate; - uint32_t op = o0.as().valueAs(); - uint32_t cRm = o1.as().valueAs(); + uint32_t op = o0.as().value_as(); + uint32_t crm = o1.as().value_as(); uint32_t op1 = uint32_t(op) >> 3; uint32_t op2 = uint32_t(op) & 0x7u; opcode.reset(0b11010101000000000100000000011111); - opcode.addImm(op1, 16); - opcode.addImm(cRm, 8); - opcode.addImm(op2, 5); + opcode.add_imm(op1, 16); + opcode.add_imm(crm, 8); + opcode.add_imm(op2, 5); goto EmitOp; } @@ -2137,38 +2231,38 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseSys: { if (isign4 == ENC_OPS4(Imm, Imm, Imm, Imm)) { - if (o0.as().valueAs() > 0x7u || - o1.as().valueAs() > 0xFu || - o2.as().valueAs() > 0xFu || - o3.as().valueAs() > 0x7u) + if (o0.as().value_as() > 0x7u || + o1.as().value_as() > 0xFu || + o2.as().value_as() > 0xFu || + o3.as().value_as() > 0x7u) goto InvalidImmediate; - uint32_t op1 = o0.as().valueAs(); - uint32_t cRn = o1.as().valueAs(); - uint32_t cRm = o2.as().valueAs(); - uint32_t op2 = o3.as().valueAs(); + uint32_t op1 = o0.as().value_as(); + uint32_t crn = o1.as().value_as(); + uint32_t crm = o2.as().value_as(); + uint32_t op2 = o3.as().value_as(); uint32_t rt = 31; - const Operand_& o4 = opExt[EmitterUtils::kOp4]; - if (o4.isReg()) { - if (!o4.as().isGpX()) + const Operand_& o4 = op_ext[EmitterUtils::kOp4]; + if (o4.is_reg()) { + if (!o4.as().is_gp64()) goto InvalidInstruction; - if (!checkGpId(o4, kZR)) + if (!check_gp_id(o4, kZR)) goto InvalidPhysId; rt = o4.id() & 31; } - else if (!o4.isNone()) { + else if (!o4.is_none()) { goto InvalidInstruction; } opcode.reset(0b11010101000010000000000000000000); - opcode.addImm(op1, 16); - opcode.addImm(cRn, 12); - opcode.addImm(cRm, 8); - opcode.addImm(op2, 5); - opcode.addImm(rt, 0); + opcode.add_imm(op1, 16); + opcode.add_imm(crn, 12); + opcode.add_imm(crm, 8); + opcode.add_imm(op2, 5); + opcode.add_imm(rt, 0); goto EmitOp; } @@ -2180,17 +2274,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseBranchReg: { - const InstDB::EncodingData::BaseBranchReg& opData = InstDB::EncodingData::baseBranchReg[encodingIndex]; + const InstDB::EncodingData::BaseBranchReg& op_data = InstDB::EncodingData::baseBranchReg[encoding_index]; if (isign4 == ENC_OPS1(Reg)) { - if (!o0.as().isGpX()) + if (!o0.as().is_gp64()) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode); - opcode.addReg(o0, 5); + opcode.reset(op_data.opcode); + opcode.add_reg(o0, 5); goto EmitOp; } @@ -2198,20 +2292,26 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBranchRel: { - const InstDB::EncodingData::BaseBranchRel& opData = InstDB::EncodingData::baseBranchRel[encodingIndex]; + const InstDB::EncodingData::BaseBranchRel& op_data = InstDB::EncodingData::baseBranchRel[encoding_index]; if (isign4 == ENC_OPS1(Label) || isign4 == ENC_OPS1(Imm)) { - opcode.reset(opData.opcode); - rmRel = &o0; + opcode.reset(op_data.opcode); + rm_rel = &o0; + + // A variation that uses Cond code (or where Cond code is forced like BC.). + if (inst_cc != CondCode::kAL || Support::bit_test(opcode.v, 30)) { + if (opcode.has_x()) { + // Condition code cannot be applied when the instruction has X bit set (this would be BL instruction). + goto InvalidInstruction; + } - if (instCC != CondCode::kAL) { opcode |= B(30); - opcode.addImm(condCodeToOpcodeCond(uint32_t(instCC)), 0); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 19, 2); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(inst_cc)), 0); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 19, 2); goto EmitOp_Rel; } - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 0, 26, 2); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 0, 26, 2); goto EmitOp_Rel; } @@ -2219,22 +2319,22 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBranchCmp: { - const InstDB::EncodingData::BaseBranchCmp& opData = InstDB::EncodingData::baseBranchCmp[encodingIndex]; + const InstDB::EncodingData::BaseBranchCmp& op_data = InstDB::EncodingData::baseBranchCmp[encoding_index]; if (isign4 == ENC_OPS2(Reg, Label) || isign4 == ENC_OPS2(Reg, Imm)) { uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode); - opcode.addImm(x, 31); - opcode.addReg(o0, 0); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 19, 2); + opcode.reset(op_data.opcode); + opcode.add_imm(x, 31); + opcode.add_reg(o0, 0); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 19, 2); - rmRel = &o1; + rm_rel = &o1; goto EmitOp_Rel; } @@ -2242,31 +2342,31 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseBranchTst: { - const InstDB::EncodingData::BaseBranchTst& opData = InstDB::EncodingData::baseBranchTst[encodingIndex]; + const InstDB::EncodingData::BaseBranchTst& op_data = InstDB::EncodingData::baseBranchTst[encoding_index]; if (isign4 == ENC_OPS3(Reg, Imm, Label) || isign4 == ENC_OPS3(Reg, Imm, Imm)) { uint32_t x; - if (!checkGpType(o0, kWX, &x)) + if (!check_gp_type(o0, kWX, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - uint64_t imm = o1.as().valueAs(); + uint64_t imm = o1.as().value_as(); - opcode.reset(opData.opcode); + opcode.reset(op_data.opcode); if (imm >= 32) { if (!x) goto InvalidImmediate; - opcode.addImm(x, 31); + opcode.add_imm(x, 31); imm &= 0x1F; } - opcode.addReg(o0, 0); - opcode.addImm(imm, 19); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 14, 2); + opcode.add_reg(o0, 0); + opcode.add_imm(imm, 19); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 14, 2); - rmRel = &o2; + rm_rel = &o2; goto EmitOp_Rel; } @@ -2278,74 +2378,74 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBasePrfm: { - const InstDB::EncodingData::BasePrfm& opData = InstDB::EncodingData::basePrfm[encodingIndex]; + const InstDB::EncodingData::BasePrfm& op_data = InstDB::EncodingData::basePrfm[encoding_index]; if (isign4 == ENC_OPS2(Imm, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; - uint32_t immShift = 3u; + uint32_t imm_shift = 3u; - if (o0.as().valueAs() > 0x1Fu) + if (o0.as().value_as() > 0x1Fu) goto InvalidImmediate; - if (!armCheckMemBaseIndexRel(m)) + if (!check_mem_base_index_rel(m)) goto InvalidAddress; int64_t offset = m.offset(); - uint32_t prfop = o0.as().valueAs(); + uint32_t prfop = o0.as().value_as(); - if (m.hasBaseReg()) { + if (m.has_base_reg()) { // [Base {Offset | Index}] - if (m.hasIndex()) { - uint32_t opt = armShiftOpToLdStOptMap[size_t(m.shiftOp())]; + if (m.has_index()) { + uint32_t opt = shift_op_to_ld_st_opt_map[size_t(m.shift_op())]; if (opt == 0xFF) goto InvalidAddress; uint32_t shift = m.shift(); uint32_t s = shift != 0; - if (s && shift != immShift) + if (s && shift != imm_shift) goto InvalidAddressScale; - opcode.reset(uint32_t(opData.registerOp) << 21); - opcode.addImm(opt, 13); - opcode.addImm(s, 12); + opcode.reset(uint32_t(op_data.register_op) << 21); + opcode.add_imm(opt, 13); + opcode.add_imm(s, 12); opcode |= B(11); - opcode.addImm(prfop, 0); + opcode.add_imm(prfop, 0); goto EmitOp_MemBaseIndex_Rn5_Rm16; } - if (!Support::isInt32(offset)) + if (!Support::is_int_n<32>(offset)) goto InvalidDisplacement; int32_t offset32 = int32_t(offset); - if (m.isPreOrPost()) + if (m.is_pre_or_post()) goto InvalidAddress; - uint32_t imm12 = uint32_t(offset32) >> immShift; + uint32_t imm12 = uint32_t(offset32) >> imm_shift; - if (Support::isUInt12(imm12) && (imm12 << immShift) == uint32_t(offset32)) { - opcode.reset(uint32_t(opData.sOffsetOp) << 22); - opcode.addImm(imm12, 10); - opcode.addImm(prfop, 0); + if (Support::is_uint_n<12>(imm12) && (imm12 << imm_shift) == uint32_t(offset32)) { + opcode.reset(uint32_t(op_data.s_offset_op) << 22); + opcode.add_imm(imm12, 10); + opcode.add_imm(prfop, 0); goto EmitOp_MemBase_Rn5; } - if (Support::isInt9(offset32)) { - opcode.reset(uint32_t(opData.uOffsetOp) << 21); - opcode.addImm(uint32_t(offset32) & 0x1FFu, 12); - opcode.addImm(prfop, 0); + if (Support::is_int_n<9>(offset32)) { + opcode.reset(uint32_t(op_data.u_offset_op) << 21); + opcode.add_imm(uint32_t(offset32) & 0x1FFu, 12); + opcode.add_imm(prfop, 0); goto EmitOp_MemBase_Rn5; } goto InvalidAddress; } else { - opcode.reset(uint32_t(opData.literalOp) << 24); - opcode.addImm(prfop, 0); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 19, 2); + opcode.reset(uint32_t(op_data.literal_op) << 24); + opcode.add_imm(prfop, 0); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 19, 2); goto EmitOp_Rel; } } @@ -2358,96 +2458,96 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingBaseLdSt: { - const InstDB::EncodingData::BaseLdSt& opData = InstDB::EncodingData::baseLdSt[encodingIndex]; + const InstDB::EncodingData::BaseLdSt& op_data = InstDB::EncodingData::baseLdSt[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; // Instructions that work with either word or dword have the unsigned // offset shift set to 2 (word), so we set it to 3 (dword) if this is // X version of the instruction. - uint32_t xShiftMask = uint32_t(opData.uOffsetShift == 2); - uint32_t immShift = uint32_t(opData.uOffsetShift) + (x & xShiftMask); + uint32_t x_shift_mask = uint32_t(op_data.u_offset_shift == 2); + uint32_t imm_shift = uint32_t(op_data.u_offset_shift) + (x & x_shift_mask); - if (!armCheckMemBaseIndexRel(m)) + if (!check_mem_base_index_rel(m)) goto InvalidAddress; int64_t offset = m.offset(); - if (m.hasBaseReg()) { + if (m.has_base_reg()) { // [Base {Offset | Index}] - if (m.hasIndex()) { - uint32_t opt = armShiftOpToLdStOptMap[size_t(m.shiftOp())]; + if (m.has_index()) { + uint32_t opt = shift_op_to_ld_st_opt_map[size_t(m.shift_op())]; if (opt == 0xFF) goto InvalidAddress; uint32_t shift = m.shift(); uint32_t s = shift != 0; - if (s && shift != immShift) + if (s && shift != imm_shift) goto InvalidAddressScale; - opcode.reset(uint32_t(opData.registerOp) << 21); - opcode.xorImm(x, opData.xOffset); - opcode.addImm(opt, 13); - opcode.addImm(s, 12); + opcode.reset(uint32_t(op_data.register_op) << 21); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_imm(opt, 13); + opcode.add_imm(s, 12); opcode |= B(11); - opcode.addReg(o0, 0); + opcode.add_reg(o0, 0); goto EmitOp_MemBaseIndex_Rn5_Rm16; } // Makes it easier to work with the offset especially on 32-bit arch. - if (!Support::isInt32(offset)) + if (!Support::is_int_n<32>(offset)) goto InvalidDisplacement; int32_t offset32 = int32_t(offset); - if (m.isPreOrPost()) { - if (!Support::isInt9(offset32)) + if (m.is_pre_or_post()) { + if (!Support::is_int_n<9>(offset32)) goto InvalidDisplacement; - opcode.reset(uint32_t(opData.prePostOp) << 21); - opcode.xorImm(x, opData.xOffset); - opcode.addImm(offset32 & 0x1FF, 12); - opcode.addImm(m.isPreIndex(), 11); + opcode.reset(uint32_t(op_data.pre_post_op) << 21); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_imm(offset32 & 0x1FF, 12); + opcode.add_imm(m.is_pre_index(), 11); opcode |= B(10); - opcode.addReg(o0, 0); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } else { - uint32_t imm12 = uint32_t(offset32) >> immShift; + uint32_t imm12 = uint32_t(offset32) >> imm_shift; // Alternative form of LDUR/STUR and related instructions as described by AArch64 reference manual: // // If this instruction is not encodable with scaled unsigned offset, try unscaled signed offset. - if (!Support::isUInt12(imm12) || (imm12 << immShift) != uint32_t(offset32)) { - instId = opData.uAltInstId; - instInfo = &InstDB::_instInfoTable[instId]; - encodingIndex = instInfo->_encodingDataIndex; + if (!Support::is_uint_n<12>(imm12) || (imm12 << imm_shift) != uint32_t(offset32)) { + inst_id = op_data.u_alt_inst_id; + inst_info = &InstDB::_inst_info_table[inst_id]; + encoding_index = inst_info->_encoding_data_index; goto Case_BaseLdurStur; } - opcode.reset(uint32_t(opData.uOffsetOp) << 22); - opcode.xorImm(x, opData.xOffset); - opcode.addImm(imm12, 10); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.u_offset_op) << 22); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_imm(imm12, 10); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } } else { - if (!opData.literalOp) + if (!op_data.literal_op) goto InvalidAddress; - opcode.reset(uint32_t(opData.literalOp) << 24); - opcode.xorImm(x, opData.xOffset); - opcode.addReg(o0, 0); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 19, 2); + opcode.reset(uint32_t(op_data.literal_op) << 24); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_reg(o0, 0); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 19, 2); goto EmitOp_Rel; } } @@ -2456,51 +2556,51 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseLdpStp: { - const InstDB::EncodingData::BaseLdpStp& opData = InstDB::EncodingData::baseLdpStp[encodingIndex]; + const InstDB::EncodingData::BaseLdpStp& op_data = InstDB::EncodingData::baseLdpStp[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { const Mem& m = o2.as(); - rmRel = &m; + rm_rel = &m; uint32_t x; - if (!checkGpType(o0, o1, opData.rType, &x)) + if (!check_gp_type(o0, o1, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - if (m.baseType() != RegType::kARM_GpX || m.hasIndex()) + if (m.base_type() != RegType::kGp64 || m.has_index()) goto InvalidAddress; - if (m.isOffset64Bit()) + if (m.is_offset_64bit()) goto InvalidDisplacement; - uint32_t offsetShift = opData.offsetShift + x; - int32_t offset32 = m.offsetLo32() >> offsetShift; + uint32_t offset_shift = op_data.offset_shift + x; + int32_t offset32 = m.offset_lo32() >> offset_shift; // Make sure we didn't lose bits by applying the mandatory offset shift. - if (uint32_t(offset32) << offsetShift != uint32_t(m.offsetLo32())) + if (uint32_t(offset32) << offset_shift != uint32_t(m.offset_lo32())) goto InvalidDisplacement; // Offset is encoded as 7-bit immediate. - if (!Support::isInt7(offset32)) + if (!Support::is_int_n<7>(offset32)) goto InvalidDisplacement; - if (m.isPreOrPost() && offset32 != 0) { - if (!opData.prePostOp) + if (m.is_pre_or_post() && offset32 != 0) { + if (!op_data.pre_post_op) goto InvalidAddress; - opcode.reset(uint32_t(opData.prePostOp) << 22); - opcode.addImm(m.isPreIndex(), 24); + opcode.reset(uint32_t(op_data.pre_post_op) << 22); + opcode.add_imm(m.is_pre_index(), 24); } else { - opcode.reset(uint32_t(opData.offsetOp) << 22); + opcode.reset(uint32_t(op_data.offset_op) << 22); } - opcode.addImm(x, opData.xOffset); - opcode.addImm(offset32 & 0x7F, 15); - opcode.addReg(o1, 10); - opcode.addReg(o0, 0); + opcode.add_imm(x, op_data.x_offset); + opcode.add_imm(offset32 & 0x7F, 15); + opcode.add_reg(o1, 10); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } @@ -2508,24 +2608,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseStx: { - const InstDB::EncodingData::BaseStx& opData = InstDB::EncodingData::baseStx[encodingIndex]; + const InstDB::EncodingData::BaseStx& op_data = InstDB::EncodingData::baseStx[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { const Mem& m = o2.as(); uint32_t x; - if (!o0.as().isGpW() || !checkGpType(o1, opData.rType, &x)) + if (!o0.as().is_gp32() || !check_gp_type(o1, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 16); - opcode.addReg(o1, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 16); + opcode.add_reg(o1, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2533,24 +2633,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseLdxp: { - const InstDB::EncodingData::BaseLdxp& opData = InstDB::EncodingData::baseLdxp[encodingIndex]; + const InstDB::EncodingData::BaseLdxp& op_data = InstDB::EncodingData::baseLdxp[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { const Mem& m = o2.as(); uint32_t x; - if (!checkGpType(o0, opData.rType, &x) || !checkSignature(o0, o1)) + if (!check_gp_type(o0, op_data.reg_type, &x) || !check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o1, 10); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o1, 10); + opcode.add_reg(o0, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2558,25 +2658,25 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseStxp: { - const InstDB::EncodingData::BaseStxp& opData = InstDB::EncodingData::baseStxp[encodingIndex]; + const InstDB::EncodingData::BaseStxp& op_data = InstDB::EncodingData::baseStxp[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem)) { const Mem& m = o3.as(); uint32_t x; - if (!o0.as().isGpW() || !checkGpType(o1, opData.rType, &x) || !checkSignature(o1, o2)) + if (!o0.as().is_gp32() || !check_gp_type(o1, op_data.reg_type, &x) || !check_signature(o1, o2)) goto InvalidInstruction; - if (!checkGpId(o0, o1, o2, kZR)) + if (!check_gp_id(o0, o1, o2, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 16); - opcode.addReg(o2, 10); - opcode.addReg(o1, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 16); + opcode.add_reg(o2, 10); + opcode.add_reg(o1, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2584,22 +2684,22 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRM_NoImm: { - const InstDB::EncodingData::BaseRM_NoImm& opData = InstDB::EncodingData::baseRM_NoImm[encodingIndex]; + const InstDB::EncodingData::BaseRM_NoImm& op_data = InstDB::EncodingData::baseRM_NoImm[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, opData.rHiId)) + if (!check_gp_id(o0, op_data.reg_hi_id)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 0); goto EmitOp_MemBaseNoImm_Rn5; } @@ -2608,44 +2708,44 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingBaseRM_SImm9: { Case_BaseLdurStur: - const InstDB::EncodingData::BaseRM_SImm9& opData = InstDB::EncodingData::baseRM_SImm9[encodingIndex]; + const InstDB::EncodingData::BaseRM_SImm9& op_data = InstDB::EncodingData::baseRM_SImm9[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, opData.rHiId)) + if (!check_gp_id(o0, op_data.reg_hi_id)) goto InvalidPhysId; - if (m.hasBaseReg() && !m.hasIndex()) { - if (m.isOffset64Bit()) + if (m.has_base_reg() && !m.has_index()) { + if (m.is_offset_64bit()) goto InvalidDisplacement; - int32_t offset32 = m.offsetLo32() >> opData.immShift; - if (Support::shl(offset32, opData.immShift) != m.offsetLo32()) + int32_t offset32 = m.offset_lo32() >> op_data.imm_shift; + if (Support::shl(offset32, op_data.imm_shift) != m.offset_lo32()) goto InvalidDisplacement; - if (!Support::isInt9(offset32)) + if (!Support::is_int_n<9>(offset32)) goto InvalidDisplacement; - if (m.isFixedOffset()) { - opcode.reset(opData.offsetOp()); + if (m.is_fixed_offset()) { + opcode.reset(op_data.offset_op()); } else { - if (!opData.prePostOp()) + if (!op_data.pre_post_op()) goto InvalidInstruction; - opcode.reset(opData.prePostOp()); - opcode.xorImm(m.isPreIndex(), 11); + opcode.reset(op_data.pre_post_op()); + opcode.xor_imm(m.is_pre_index(), 11); } - opcode.xorImm(x, opData.xOffset); - opcode.addImm(offset32 & 0x1FF, 12); - opcode.addReg(o0, 0); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_imm(offset32 & 0x1FF, 12); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } @@ -2656,42 +2756,42 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseRM_SImm10: { - const InstDB::EncodingData::BaseRM_SImm10& opData = InstDB::EncodingData::baseRM_SImm10[encodingIndex]; + const InstDB::EncodingData::BaseRM_SImm10& op_data = InstDB::EncodingData::baseRM_SImm10[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, opData.rHiId)) + if (!check_gp_id(o0, op_data.reg_hi_id)) goto InvalidPhysId; - if (m.hasBaseReg() && !m.hasIndex()) { - if (m.isOffset64Bit()) + if (m.has_base_reg() && !m.has_index()) { + if (m.is_offset_64bit()) goto InvalidDisplacement; - int32_t offset32 = m.offsetLo32() >> opData.immShift; - if (Support::shl(offset32, opData.immShift) != m.offsetLo32()) + int32_t offset32 = m.offset_lo32() >> op_data.imm_shift; + if (Support::shl(offset32, op_data.imm_shift) != m.offset_lo32()) goto InvalidDisplacement; - if (!Support::isInt10(offset32)) + if (!Support::is_int_n<10>(offset32)) goto InvalidDisplacement; - if (m.isPostIndex()) + if (m.is_post_index()) goto InvalidAddress; // Offset has 10 bits, sign is stored in the 10th bit. offset32 &= 0x3FF; - opcode.reset(opData.opcode()); - opcode.xorImm(m.isPreIndex(), 11); - opcode.xorImm(x, opData.xOffset); - opcode.addImm(offset32 >> 9, 22); - opcode.addImm(offset32, 12); - opcode.addReg(o0, 0); + opcode.reset(op_data.opcode()); + opcode.xor_imm(m.is_pre_index(), 11); + opcode.xor_imm(x, op_data.x_offset); + opcode.add_imm(offset32 >> 9, 22); + opcode.add_imm(offset32, 12); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } @@ -2702,24 +2802,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseAtomicOp: { - const InstDB::EncodingData::BaseAtomicOp& opData = InstDB::EncodingData::baseAtomicOp[encodingIndex]; + const InstDB::EncodingData::BaseAtomicOp& op_data = InstDB::EncodingData::baseAtomicOp[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { const Mem& m = o2.as(); uint32_t x; - if (!checkGpType(o0, opData.rType, &x) || !checkSignature(o0, o1)) + if (!check_gp_type(o0, op_data.reg_type, &x) || !check_signature(o0, o1)) goto InvalidInstruction; - if (!checkGpId(o0, o1, kZR)) + if (!check_gp_id(o0, o1, kZR)) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 16); - opcode.addReg(o1, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 16); + opcode.add_reg(o1, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2727,24 +2827,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseAtomicSt: { - const InstDB::EncodingData::BaseAtomicSt& opData = InstDB::EncodingData::baseAtomicSt[encodingIndex]; + const InstDB::EncodingData::BaseAtomicSt& op_data = InstDB::EncodingData::baseAtomicSt[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkGpId(o0, kZR)) + if (!check_gp_id(o0, kZR)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 16); - opcode.addReg(Gp::kIdZr, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 16); + opcode.add_reg(Gp::kIdZr, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2752,31 +2852,31 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingBaseAtomicCasp: { - const InstDB::EncodingData::BaseAtomicCasp& opData = InstDB::EncodingData::baseAtomicCasp[encodingIndex]; - const Operand_& o4 = opExt[EmitterUtils::kOp4]; + const InstDB::EncodingData::BaseAtomicCasp& op_data = InstDB::EncodingData::baseAtomicCasp[encoding_index]; + const Operand_& o4 = op_ext[EmitterUtils::kOp4]; - if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg) && o4.isMem()) { + if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg) && o4.is_mem()) { const Mem& m = o4.as(); uint32_t x; - if (!checkGpType(o0, opData.rType, &x)) + if (!check_gp_type(o0, op_data.reg_type, &x)) goto InvalidInstruction; - if (!checkSignature(o0, o1, o2, o3)) + if (!check_signature(o0, o1, o2, o3)) goto InvalidInstruction; - if (!checkEven(o0, o2) || !checkGpId(o0, o2, kZR)) + if (!check_even(o0, o2) || !check_gp_id(o0, o2, kZR)) goto InvalidPhysId; - if (!checkConsecutive(o0, o1) || !checkConsecutive(o2, o3)) + if (!check_consecutive(o0, o1) || !check_consecutive(o2, o3)) goto InvalidPhysId; - opcode.reset(opData.opcode()); - opcode.addImm(x, opData.xOffset); - opcode.addReg(o0, 16); - opcode.addReg(o2, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(x, op_data.x_offset); + opcode.add_reg(o0, 16); + opcode.add_reg(o2, 0); - rmRel = &m; + rm_rel = &m; goto EmitOp_MemBaseNoImm_Rn5; } @@ -2788,24 +2888,24 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingFSimdSV: { - const InstDB::EncodingData::FSimdSV& opData = InstDB::EncodingData::fSimdSV[encodingIndex]; + const InstDB::EncodingData::FSimdSV& op_data = InstDB::EncodingData::fSimdSV[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { - uint32_t q = diff(o1.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o1.as().reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - if (o0.as().hasElementType()) + if (o0.as().has_element_type()) goto InvalidInstruction; // This operation is only defined for: // hD, vS.{4|8}h (16-bit) // sD, vS.4s (32-bit) - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); - uint32_t elementSz = diff(o1.as().elementType(), VecElementType::kH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); + uint32_t element_sz = diff(o1.as().element_type(), VecElementType::kH); // Size greater than 1 means 64-bit elements, not supported. - if ((sz | elementSz) > 1 || sz != elementSz) + if ((sz | element_sz) > 1 || sz != element_sz) goto InvalidInstruction; // Size 1 (32-bit float) requires at least 4 elements. @@ -2813,11 +2913,11 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co goto InvalidInstruction; // Bit flipping according to sz. - static const uint32_t szBits[] = { B(29), 0 }; + static const uint32_t sz_bits_table[] = { B(29), 0 }; - opcode.reset(opData.opcode << 10); - opcode ^= szBits[sz]; - opcode.addImm(q, 30); + opcode.reset(op_data.opcode << 10); + opcode ^= sz_bits_table[sz]; + opcode.add_imm(q, 30); goto EmitOp_Rd0_Rn5; } @@ -2825,13 +2925,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingFSimdVV: { - const InstDB::EncodingData::FSimdVV& opData = InstDB::EncodingData::fSimdVV[encodingIndex]; + const InstDB::EncodingData::FSimdVV& op_data = InstDB::EncodingData::fSimdVV[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { - if (!matchSignature(o0, o1, instFlags)) + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.scalarOp(), opData.scalarHf(), opData.vectorOp(), opData.vectorHf(), &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_op(), op_data.scalar_hf(), op_data.vector_op(), op_data.vector_hf(), &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5; @@ -2841,13 +2941,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingFSimdVVV: { - const InstDB::EncodingData::FSimdVVV& opData = InstDB::EncodingData::fSimdVVV[encodingIndex]; + const InstDB::EncodingData::FSimdVVV& op_data = InstDB::EncodingData::fSimdVVV[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.scalarOp(), opData.scalarHf(), opData.vectorOp(), opData.vectorHf(), &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_op(), op_data.scalar_hf(), op_data.vector_op(), op_data.vector_hf(), &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5_Rm16; @@ -2857,39 +2957,39 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingFSimdVVVe: { - const InstDB::EncodingData::FSimdVVVe& opData = InstDB::EncodingData::fSimdVVVe[encodingIndex]; + const InstDB::EncodingData::FSimdVVVe& op_data = InstDB::EncodingData::fSimdVVVe[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!o2.as().hasElementIndex()) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (!o2.as().has_element_index()) { + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.scalarOp(), opData.scalarHf(), opData.vectorOp(), opData.vectorHf(), &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_op(), op_data.scalar_hf(), op_data.vector_op(), op_data.vector_hf(), &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5_Rm16; } else { - if (!matchSignature(o0, o1, instFlags)) + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - uint32_t q = o1.as().isVecQ(); + uint32_t q = o1.as().is_vec128(); uint32_t sz; - if (!pickFpOpcode(o0.as(), opData.elementScalarOp(), InstDB::kHF_D, opData.elementVectorOp(), InstDB::kHF_D, &opcode, &sz)) + if (!pick_fp_opcode(o0.as(), op_data.element_scalar_op(), InstDB::kHF_D, op_data.element_vector_op(), InstDB::kHF_D, &opcode, &sz)) goto InvalidInstruction; if (sz == 0 && o2.as().id() > 15) goto InvalidPhysId; - uint32_t elementIndex = o2.as().elementIndex(); - if (elementIndex > (7u >> sz)) + uint32_t element_index = o2.as().element_index(); + if (element_index > (7u >> sz)) goto InvalidElementIndex; - uint32_t hlm = elementIndex << sz; - opcode.addImm(q, 30); - opcode.addImm(hlm & 3u, 20); - opcode.addImm(hlm >> 2, 11); + uint32_t hlm = element_index << sz; + opcode.add_imm(q, 30); + opcode.add_imm(hlm & 3u, 20); + opcode.add_imm(hlm >> 2, 11); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -2898,13 +2998,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingFSimdVVVV: { - const InstDB::EncodingData::FSimdVVVV& opData = InstDB::EncodingData::fSimdVVVV[encodingIndex]; + const InstDB::EncodingData::FSimdVVVV& op_data = InstDB::EncodingData::fSimdVVVV[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) { - if (!matchSignature(o0, o1, o2, o3, instFlags)) + if (!match_signature(o0, o1, o2, o3, inst_flags)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.scalarOp(), opData.scalarHf(), opData.vectorOp(), opData.vectorHf(), &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_op(), op_data.scalar_hf(), op_data.vector_op(), op_data.vector_hf(), &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5_Rm16_Ra10; @@ -2914,17 +3014,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcadd: { - const InstDB::EncodingData::SimdFcadd& opData = InstDB::EncodingData::simdFcadd[encodingIndex]; + const InstDB::EncodingData::SimdFcadd& op_data = InstDB::EncodingData::simdFcadd[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - if (!checkSignature(o0, o1, o2) || o0.as().hasElementIndex()) + if (!check_signature(o0, o1, o2) || o0.as().has_element_index()) goto InvalidInstruction; - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - uint32_t sz = diff(o0.as().elementType(), VecElementType::kB); + uint32_t sz = diff(o0.as().element_type(), VecElementType::kB); if (sz == 0 || sz > 3) goto InvalidInstruction; @@ -2936,10 +3036,10 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co else if (o3.as().value() != 90) goto InvalidImmediate; - opcode.reset(opData.opcode()); - opcode.addImm(q, 30); - opcode.addImm(sz, 22); - opcode.addImm(rot, 12); + opcode.reset(op_data.opcode()); + opcode.add_imm(q, 30); + opcode.add_imm(sz, 22); + opcode.add_imm(rot, 12); goto EmitOp_Rd0_Rn5_Rm16; } @@ -2947,28 +3047,28 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFccmpFccmpe: { - const InstDB::EncodingData::SimdFccmpFccmpe& opData = InstDB::EncodingData::simdFccmpFccmpe[encodingIndex]; + const InstDB::EncodingData::SimdFccmpFccmpe& op_data = InstDB::EncodingData::simdFccmpFccmpe[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) { - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); if (sz > 2) goto InvalidInstruction; - if (!checkSignature(o0, o1) || o0.as().hasElementType()) + if (!check_signature(o0, o1) || o0.as().has_element_type()) goto InvalidInstruction; - uint64_t nzcv = o2.as().valueAs(); - uint64_t cond = o3.as().valueAs(); + uint64_t nzcv = o2.as().value_as(); + uint64_t cond = o3.as().value_as(); if ((nzcv | cond) > 0xFu) goto InvalidImmediate; uint32_t type = (sz - 1) & 0x3u; - opcode.reset(opData.opcode()); - opcode.addImm(type, 22); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)), 12); - opcode.addImm(nzcv, 0); + opcode.reset(op_data.opcode()); + opcode.add_imm(type, 22); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)), 12); + opcode.add_imm(nzcv, 0); goto EmitOp_Rn5_Rm16; } @@ -2977,26 +3077,26 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcm: { - const InstDB::EncodingData::SimdFcm& opData = InstDB::EncodingData::simdFcm[encodingIndex]; + const InstDB::EncodingData::SimdFcm& op_data = InstDB::EncodingData::simdFcm[encoding_index]; - if (isign4 == ENC_OPS3(Reg, Reg, Reg) && opData.hasRegisterOp()) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (isign4 == ENC_OPS3(Reg, Reg, Reg) && op_data.has_register_op()) { + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.registerScalarOp(), opData.registerScalarHf(), opData.registerVectorOp(), opData.registerVectorHf(), &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.register_scalar_op(), op_data.register_scalar_hf(), op_data.register_vector_op(), op_data.register_vector_hf(), &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5_Rm16; } - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.hasZeroOp()) { - if (!checkSignature(o0, o1)) + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.has_zero_op()) { + if (!check_signature(o0, o1)) goto InvalidInstruction; if (o2.as().value() != 0 || o2.as().predicate() != 0) goto InvalidImmediate; - if (!pickFpOpcode(o0.as(), opData.zeroScalarOp(), InstDB::kHF_B, opData.zeroVectorOp(), InstDB::kHF_B, &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.zero_scalar_op(), InstDB::kHF_B, op_data.zero_vector_op(), InstDB::kHF_B, &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5; @@ -3006,17 +3106,17 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcmla: { - const InstDB::EncodingData::SimdFcmla& opData = InstDB::EncodingData::simdFcmla[encodingIndex]; + const InstDB::EncodingData::SimdFcmla& op_data = InstDB::EncodingData::simdFcmla[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - uint32_t sz = diff(o0.as().elementType(), VecElementType::kB); + uint32_t sz = diff(o0.as().element_type(), VecElementType::kB); if (sz == 0 || sz > 3) goto InvalidInstruction; @@ -3030,18 +3130,18 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co goto InvalidImmediate; } - if (!o2.as().hasElementIndex()) { - if (!checkSignature(o1, o2)) + if (!o2.as().has_element_index()) { + if (!check_signature(o1, o2)) goto InvalidInstruction; - opcode.reset(opData.regularOp()); - opcode.addImm(q, 30); - opcode.addImm(sz, 22); - opcode.addImm(rot, 11); + opcode.reset(op_data.regular_op()); + opcode.add_imm(q, 30); + opcode.add_imm(sz, 22); + opcode.add_imm(rot, 11); goto EmitOp_Rd0_Rn5_Rm16; } else { - if (o0.as().elementType() != o2.as().elementType()) + if (o0.as().element_type() != o2.as().element_type()) goto InvalidInstruction; // Only allowed vectors are: 4H, 8H, and 4S. @@ -3052,21 +3152,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // 4H - ElementIndex[0..1] (index 2..3 is UNDEFINED). // 8H - ElementIndex[0..3]. // 4S - ElementIndex[0..1]. - uint32_t elementIndex = o2.as().elementIndex(); - uint32_t hlFieldShift = sz == 1 ? 0u : 1u; - uint32_t maxElementIndex = q == 1 && sz == 1 ? 3u : 1u; + uint32_t element_index = o2.as().element_index(); + uint32_t hl_field_shift = sz == 1 ? 0u : 1u; + uint32_t max_element_index = q == 1 && sz == 1 ? 3u : 1u; - if (elementIndex > maxElementIndex) + if (element_index > max_element_index) goto InvalidElementIndex; - uint32_t hl = elementIndex << hlFieldShift; + uint32_t hl = element_index << hl_field_shift; - opcode.reset(opData.elementOp()); - opcode.addImm(q, 30); - opcode.addImm(sz, 22); - opcode.addImm(hl & 1u, 21); // L field. - opcode.addImm(hl >> 1, 11); // H field. - opcode.addImm(rot, 13); + opcode.reset(op_data.element_op()); + opcode.add_imm(q, 30); + opcode.add_imm(sz, 22); + opcode.add_imm(hl & 1u, 21); // L field. + opcode.add_imm(hl >> 1, 11); // H field. + opcode.add_imm(rot, 13); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -3075,22 +3175,22 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcmpFcmpe: { - const InstDB::EncodingData::SimdFcmpFcmpe& opData = InstDB::EncodingData::simdFcmpFcmpe[encodingIndex]; + const InstDB::EncodingData::SimdFcmpFcmpe& op_data = InstDB::EncodingData::simdFcmpFcmpe[encoding_index]; - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); uint32_t type = (sz - 1) & 0x3u; if (sz > 2) goto InvalidInstruction; - if (o0.as().hasElementType()) + if (o0.as().has_element_type()) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(type, 22); + opcode.reset(op_data.opcode()); + opcode.add_imm(type, 22); if (isign4 == ENC_OPS2(Reg, Reg)) { - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; goto EmitOp_Rn5_Rm16; @@ -3109,22 +3209,22 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingSimdFcsel: { if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - if (!checkSignature(o0, o1, o2)) + if (!check_signature(o0, o1, o2)) goto InvalidInstruction; - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); uint32_t type = (sz - 1) & 0x3u; - if (sz > 2 || o0.as().hasElementType()) + if (sz > 2 || o0.as().has_element_type()) goto InvalidInstruction; - uint64_t cond = o3.as().valueAs(); + uint64_t cond = o3.as().value_as(); if (cond > 0xFu) goto InvalidImmediate; opcode.reset(0b00011110001000000000110000000000); - opcode.addImm(type, 22); - opcode.addImm(condCodeToOpcodeCond(uint32_t(cond)), 12); + opcode.add_imm(type, 22); + opcode.add_imm(cond_code_to_opcode_field(uint32_t(cond)), 12); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3133,13 +3233,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingSimdFcvt: { if (isign4 == ENC_OPS2(Reg, Reg)) { - uint32_t dstSz = diff(o0.as().type(), RegType::kARM_VecH); - uint32_t srcSz = diff(o1.as().type(), RegType::kARM_VecH); + uint32_t dst_sz = diff(o0.as().reg_type(), RegType::kVec16); + uint32_t src_sz = diff(o1.as().reg_type(), RegType::kVec16); - if ((dstSz | srcSz) > 3) + if ((dst_sz | src_sz) > 3) goto InvalidInstruction; - if (o0.as().hasElementType() || o1.as().hasElementType()) + if (o0.as().has_element_type() || o1.as().has_element_type()) goto InvalidInstruction; // Table that provides 'type' and 'opc' according to the dst/src combination. @@ -3162,10 +3262,10 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co 0xFFu // Q <- Q (Invalid). }; - uint32_t typeOpc = table[(dstSz << 2) | srcSz]; + uint32_t type_opc = table[(dst_sz << 2) | src_sz]; opcode.reset(0b0001111000100010010000 << 10); - opcode.addImm(typeOpc >> 4, 22); - opcode.addImm(typeOpc & 15, 15); + opcode.add_imm(type_opc >> 4, 22); + opcode.add_imm(type_opc & 15, 15); goto EmitOp_Rd0_Rn5; } @@ -3173,36 +3273,36 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcvtLN: { - const InstDB::EncodingData::SimdFcvtLN& opData = InstDB::EncodingData::simdFcvtLN[encodingIndex]; + const InstDB::EncodingData::SimdFcvtLN& op_data = InstDB::EncodingData::simdFcvtLN[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { // Scalar form - only FCVTXN. - if (o0.as().isVecS() && o1.as().isVecD()) { - if (!opData.hasScalar()) + if (o0.as().is_vec32() && o1.as().is_vec64()) { + if (!op_data.has_scalar()) goto InvalidInstruction; - if (o0.as().hasElementType() || o1.as().hasElementType()) + if (o0.as().has_element_type() || o1.as().has_element_type()) goto InvalidInstruction; - opcode.reset(opData.scalarOp()); + opcode.reset(op_data.scalar_op()); opcode |= B(22); // sz bit must be 1, the only supported combination of FCVTXN. goto EmitOp_Rd0_Rn5; } - opcode.reset(opData.vectorOp()); + opcode.reset(op_data.vector_op()); - const Vec& rL = (instFlags & InstDB::kInstFlagLong) ? o0.as() : o1.as(); - const Vec& rN = (instFlags & InstDB::kInstFlagLong) ? o1.as() : o0.as(); + const Vec& rl = (inst_flags & InstDB::kInstFlagLong) ? o0.as() : o1.as(); + const Vec& rn = (inst_flags & InstDB::kInstFlagLong) ? o1.as() : o0.as(); - uint32_t q = diff(rN.type(), RegType::kARM_VecD); - if (uint32_t(opcode.hasQ()) != q) + uint32_t q = diff(rn.reg_type(), RegType::kVec64); + if (uint32_t(opcode.has_q()) != q) goto InvalidInstruction; - if (rL.isVecS4() && rN.elementType() == VecElementType::kH && !opData.isCvtxn()) { + if (rl.is_vec_s4() && rn.element_type() == VecElementType::kH && !op_data.is_cvtxn()) { goto EmitOp_Rd0_Rn5; } - if (rL.isVecD2() && rN.elementType() == VecElementType::kS) { + if (rl.is_vec_d2() && rn.element_type() == VecElementType::kS) { opcode |= B(22); goto EmitOp_Rd0_Rn5; } @@ -3212,76 +3312,76 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFcvtSV: { - const InstDB::EncodingData::SimdFcvtSV& opData = InstDB::EncodingData::simdFcvtSV[encodingIndex]; + const InstDB::EncodingData::SimdFcvtSV& op_data = InstDB::EncodingData::simdFcvtSV[encoding_index]; // So we can support both IntToFloat and FloatToInt conversions. - const Operand_& oGp = opData.isFloatToInt() ? o0 : o1; - const Operand_& oVec = opData.isFloatToInt() ? o1 : o0; + const Operand_& op_gp = op_data.is_float_to_int() ? o0 : o1; + const Operand_& op_vec = op_data.is_float_to_int() ? o1 : o0; if (isign4 == ENC_OPS2(Reg, Reg)) { - if (oGp.as().isGp() && oVec.as().isVec()) { - uint32_t x = oGp.as().isGpX(); - uint32_t type = diff(oVec.as().type(), RegType::kARM_VecH); + if (op_gp.as().is_gp() && op_vec.as().is_vec()) { + uint32_t x = op_gp.as().is_gp64(); + uint32_t type = diff(op_vec.as().reg_type(), RegType::kVec16); if (type > 2u) goto InvalidInstruction; type = (type - 1u) & 0x3; - opcode.reset(opData.generalOp()); - opcode.addImm(type, 22); - opcode.addImm(x, 31); + opcode.reset(op_data.general_op()); + opcode.add_imm(type, 22); + opcode.add_imm(x, 31); goto EmitOp_Rd0_Rn5; } - if (o0.as().isVec() && o1.as().isVec()) { - if (!checkSignature(o0, o1)) + if (o0.as().is_vec() && o1.as().is_vec()) { + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!pickFpOpcode(o0.as(), opData.scalarIntOp(), InstDB::kHF_B, opData.vectorIntOp(), InstDB::kHF_B, &opcode)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_int_op(), InstDB::kHF_B, op_data.vector_int_op(), InstDB::kHF_B, &opcode)) goto InvalidInstruction; goto EmitOp_Rd0_Rn5; } } - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.isFixedPoint()) { - if (o2.as().valueAs() >= 64) + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.is_fixed_point()) { + if (o2.as().value_as() >= 64) goto InvalidInstruction; - uint32_t scale = o2.as().valueAs(); + uint32_t scale = o2.as().value_as(); if (scale == 0) goto InvalidInstruction; - if (oGp.as().isGp() && oVec.as().isVec()) { - uint32_t x = oGp.as().isGpX(); - uint32_t type = diff(oVec.as().type(), RegType::kARM_VecH); + if (op_gp.as().is_gp() && op_vec.as().is_vec()) { + uint32_t x = op_gp.as().is_gp64(); + uint32_t type = diff(op_vec.as().reg_type(), RegType::kVec16); - uint32_t scaleLimit = 32u << x; - if (scale > scaleLimit) + uint32_t scale_limit = 32u << x; + if (scale > scale_limit) goto InvalidInstruction; type = (type - 1u) & 0x3; - opcode.reset(opData.generalOp() ^ B(21)); - opcode.addImm(type, 22); - opcode.addImm(x, 31); - opcode.addImm(64u - scale, 10); + opcode.reset(op_data.general_op() ^ B(21)); + opcode.add_imm(type, 22); + opcode.add_imm(x, 31); + opcode.add_imm(64u - scale, 10); goto EmitOp_Rd0_Rn5; } - if (o0.as().isVec() && o1.as().isVec()) { - if (!checkSignature(o0, o1)) + if (o0.as().is_vec() && o1.as().is_vec()) { + if (!check_signature(o0, o1)) goto InvalidInstruction; uint32_t sz; - if (!pickFpOpcode(o0.as(), opData.scalarFpOp(), InstDB::kHF_0, opData.vectorFpOp(), InstDB::kHF_0, &opcode, &sz)) + if (!pick_fp_opcode(o0.as(), op_data.scalar_fp_op(), InstDB::kHF_0, op_data.vector_fp_op(), InstDB::kHF_0, &opcode, &sz)) goto InvalidInstruction; - uint32_t scaleLimit = 16u << sz; - if (scale > scaleLimit) + uint32_t scale_limit = 16u << sz; + if (scale > scale_limit) goto InvalidInstruction; - uint32_t imm = Support::neg(scale) & Support::lsbMask(sz + 4 + 1); - opcode.addImm(imm, 16); + uint32_t imm = Support::neg(scale) & Support::lsb_mask(sz + 4 + 1); + opcode.add_imm(imm, 16); goto EmitOp_Rd0_Rn5; } } @@ -3290,13 +3390,13 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdFmlal: { - const InstDB::EncodingData::SimdFmlal& opData = InstDB::EncodingData::simdFmlal[encodingIndex]; + const InstDB::EncodingData::SimdFmlal& op_data = InstDB::EncodingData::simdFmlal[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); - uint32_t qIsOptional = opData.optionalQ(); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); + uint32_t q_is_optional = op_data.optional_q(); - if (qIsOptional) { + if (q_is_optional) { // This instruction works with either 64-bit or 128-bit registers, // encoded by Q bit. if (q > 1) @@ -3312,34 +3412,34 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co q = 0; } - if (uint32_t(o0.as().type()) != uint32_t(o1.as().type()) + qIsOptional || - uint32_t(o0.as().elementType()) != opData.tA || - uint32_t(o1.as().elementType()) != opData.tB) + if (uint32_t(o0.as().reg_type()) != uint32_t(o1.as().reg_type()) + q_is_optional || + uint32_t(o0.as().element_type()) != op_data.ta || + uint32_t(o1.as().element_type()) != op_data.tb) goto InvalidInstruction; - if (!o2.as().hasElementIndex()) { - if (!checkSignature(o1, o2)) + if (!o2.as().has_element_index()) { + if (!check_signature(o1, o2)) goto InvalidInstruction; - opcode.reset(opData.vectorOp()); - opcode.addImm(q, 30); + opcode.reset(op_data.vector_op()); + opcode.add_imm(q, 30); goto EmitOp_Rd0_Rn5_Rm16; } else { - if (uint32_t(o2.as().elementType()) != opData.tElement) + if (uint32_t(o2.as().element_type()) != op_data.tElement) goto InvalidInstruction; if (o2.as().id() > 15) goto InvalidPhysId; - uint32_t elementIndex = o2.as().elementIndex(); - if (elementIndex > 7u) + uint32_t element_index = o2.as().element_index(); + if (element_index > 7u) goto InvalidElementIndex; - opcode.reset(opData.elementOp()); - opcode.addImm(q, 30); - opcode.addImm(elementIndex & 3u, 20); - opcode.addImm(elementIndex >> 2, 11); + opcode.reset(op_data.element_op()); + opcode.add_imm(q, 30); + opcode.add_imm(element_index & 3u, 20); + opcode.add_imm(element_index >> 2, 11); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -3352,141 +3452,141 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // FMOV Gp <-> Vec opcode: opcode.reset(0b00011110001001100000000000000000); - if (o0.as().isGp() && o1.as().isVec()) { + if (o0.as().is_gp() && o1.as().is_vec()) { // FMOV Wd, Hn (sf=0 type=11 rmode=00 op=110) // FMOV Xd, Hn (sf=1 type=11 rmode=00 op=110) // FMOV Wd, Sn (sf=0 type=00 rmode=00 op=110) // FMOV Xd, Dn (sf=1 type=11 rmode=00 op=110) // FMOV Xd, Vn.d[1] (sf=1 type=10 rmode=01 op=110) - uint32_t x = o0.as().isGpX(); - uint32_t sz = diff(o1.as().type(), RegType::kARM_VecH); + uint32_t x = o0.as().is_gp64(); + uint32_t sz = diff(o1.as().reg_type(), RegType::kVec16); uint32_t type = (sz - 1) & 0x3u; - uint32_t rModeOp = 0b00110; + uint32_t r_mode_op = 0b00110; - if (o1.as().hasElementIndex()) { + if (o1.as().has_element_index()) { // Special case. - if (!x || !o1.as().isVecD2() || o1.as().elementIndex() != 1) + if (!x || !o1.as().is_vec_d2() || o1.as().element_index() != 1) goto InvalidInstruction; type = 0b10; - rModeOp = 0b01110; + r_mode_op = 0b01110; } else { // Must be scalar. if (sz > 2) goto InvalidInstruction; - if (o1.as().hasElementType()) + if (o1.as().has_element_type()) goto InvalidInstruction; - if (o1.as().isVecS() && x) + if (o1.as().is_vec32() && x) goto InvalidInstruction; - if (o1.as().isVecD() && !x) + if (o1.as().is_vec64() && !x) goto InvalidInstruction; } - opcode.addImm(x, 31); - opcode.addImm(type, 22); - opcode.addImm(rModeOp, 16); + opcode.add_imm(x, 31); + opcode.add_imm(type, 22); + opcode.add_imm(r_mode_op, 16); goto EmitOp_Rd0_Rn5; } - if (o0.as().isVec() && o1.as().isGp()) { + if (o0.as().is_vec() && o1.as().is_gp()) { // FMOV Hd, Wn (sf=0 type=11 rmode=00 op=111) // FMOV Hd, Xn (sf=1 type=11 rmode=00 op=111) // FMOV Sd, Wn (sf=0 type=00 rmode=00 op=111) // FMOV Dd, Xn (sf=1 type=11 rmode=00 op=111) // FMOV Vd.d[1], Xn (sf=1 type=10 rmode=01 op=111) - uint32_t x = o1.as().isGpX(); - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t x = o1.as().is_gp64(); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); uint32_t type = (sz - 1) & 0x3u; - uint32_t rModeOp = 0b00111; + uint32_t r_mode_op = 0b00111; - if (o0.as().hasElementIndex()) { + if (o0.as().has_element_index()) { // Special case. - if (!x || !o0.as().isVecD2() || o0.as().elementIndex() != 1) + if (!x || !o0.as().is_vec_d2() || o0.as().element_index() != 1) goto InvalidInstruction; type = 0b10; - rModeOp = 0b01111; + r_mode_op = 0b01111; } else { // Must be scalar. if (sz > 2) goto InvalidInstruction; - if (o0.as().hasElementType()) + if (o0.as().has_element_type()) goto InvalidInstruction; - if (o0.as().isVecS() && x) + if (o0.as().is_vec32() && x) goto InvalidInstruction; - if (o0.as().isVecD() && !x) + if (o0.as().is_vec64() && !x) goto InvalidInstruction; } - opcode.addImm(x, 31); - opcode.addImm(type, 22); - opcode.addImm(rModeOp, 16); + opcode.add_imm(x, 31); + opcode.add_imm(type, 22); + opcode.add_imm(r_mode_op, 16); goto EmitOp_Rd0_Rn5; } - if (checkSignature(o0, o1)) { - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + if (check_signature(o0, o1)) { + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); if (sz > 2) goto InvalidInstruction; - if (o0.as().hasElementType()) + if (o0.as().has_element_type()) goto InvalidInstruction; uint32_t type = (sz - 1) & 0x3; opcode.reset(0b00011110001000000100000000000000); - opcode.addImm(type, 22); + opcode.add_imm(type, 22); goto EmitOp_Rd0_Rn5; } } if (isign4 == ENC_OPS2(Reg, Imm)) { - if (o0.as().isVec()) { - double fpValue; - if (o1.as().isDouble()) - fpValue = o1.as().valueAs(); - else if (o1.as().isInt32()) - fpValue = o1.as().valueAs(); + if (o0.as().is_vec()) { + double fp_value; + if (o1.as().is_double()) + fp_value = o1.as().value_as(); + else if (o1.as().is_int32()) + fp_value = o1.as().value_as(); else goto InvalidImmediate; - if (!Utils::isFP64Imm8(fpValue)) + if (!Utils::is_fp64_imm8(fp_value)) goto InvalidImmediate; - uint32_t imm8 = Utils::encodeFP64ToImm8(fpValue); - if (!o0.as().hasElementType()) { + uint32_t imm8 = Utils::encode_fp64_to_imm8(fp_value); + if (!o0.as().has_element_type()) { // FMOV (scalar, immediate). - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); uint32_t type = (sz - 1u) & 0x3u; if (sz > 2) goto InvalidInstruction; opcode.reset(0b00011110001000000001000000000000); - opcode.addImm(type, 22); - opcode.addImm(imm8, 13); + opcode.add_imm(type, 22); + opcode.add_imm(imm8, 13); goto EmitOp_Rd0; } else { - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); - uint32_t sz = diff(o0.as().elementType(), VecElementType::kH); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); + uint32_t sz = diff(o0.as().element_type(), VecElementType::kH); if (q > 1 || sz > 2) goto InvalidInstruction; - static const uint32_t szBits[3] = { B(11), B(0), B(29) }; + static const uint32_t sz_bits_table[3] = { B(11), B(0), B(29) }; opcode.reset(0b00001111000000001111010000000000); - opcode ^= szBits[sz]; - opcode.addImm(q, 30); - opcode.addImm(imm8 >> 5, 16); - opcode.addImm(imm8 & 31, 5); + opcode ^= sz_bits_table[sz]; + opcode.add_imm(q, 30); + opcode.add_imm(imm8 >> 5, 16); + opcode.add_imm(imm8 & 31, 5); goto EmitOp_Rd0; } } @@ -3496,48 +3596,48 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingFSimdPair: { - const InstDB::EncodingData::FSimdPair& opData = InstDB::EncodingData::fSimdPair[encodingIndex]; + const InstDB::EncodingData::FSimdPair& op_data = InstDB::EncodingData::fSimdPair[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { // This operation is only defined for: // hD, vS.2h (16-bit) // sD, vS.2s (32-bit) // dD, vS.2d (64-bit) - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecH); + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec16); if (sz > 2) goto InvalidInstruction; static const uint32_t szSignatures[3] = { - VecS::kSignature | (Vec::kSignatureElementH), - VecD::kSignature | (Vec::kSignatureElementS), - VecV::kSignature | (Vec::kSignatureElementD) + RegTraits::kSignature | (Vec::kSignatureElementH), + RegTraits::kSignature | (Vec::kSignatureElementS), + RegTraits::kSignature | (Vec::kSignatureElementD) }; if (o1.signature() != szSignatures[sz]) goto InvalidInstruction; - static const uint32_t szBits[] = { B(29), 0, B(22) }; - opcode.reset(opData.scalarOp()); - opcode ^= szBits[sz]; + static const uint32_t sz_bits_table[] = { B(29), 0, B(22) }; + opcode.reset(op_data.scalar_op()); + opcode ^= sz_bits_table[sz]; goto EmitOp_Rd0_Rn5; } if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!checkSignature(o0, o1, o2)) + if (!check_signature(o0, o1, o2)) goto InvalidInstruction; - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - uint32_t sz = diff(o0.as().elementType(), VecElementType::kH); + uint32_t sz = diff(o0.as().element_type(), VecElementType::kH); if (sz > 2) goto InvalidInstruction; - static const uint32_t szBits[3] = { B(22) | B(21) | B(15) | B(14), 0, B(22) }; - opcode.reset(opData.vectorOp()); - opcode ^= szBits[sz]; - opcode.addImm(q, 30); + static const uint32_t sz_bits_table[3] = { B(22) | B(21) | B(15) | B(14), 0, B(22) }; + opcode.reset(op_data.vector_op()); + opcode ^= sz_bits_table[sz]; + opcode.add_imm(q, 30); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3549,21 +3649,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingISimdSV: { - const InstDB::EncodingData::ISimdSV& opData = InstDB::EncodingData::iSimdSV[encodingIndex]; + const InstDB::EncodingData::ISimdSV& op_data = InstDB::EncodingData::iSimdSV[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { // The first destination operand is scalar, which matches element-type of source vectors. - uint32_t L = (instFlags & InstDB::kInstFlagLong) != 0; - if (diff(o0.as().type(), RegType::kARM_VecB) != diff(o1.as().elementType(), VecElementType::kB) + L) + uint32_t L = (inst_flags & InstDB::kInstFlagLong) != 0; + if (diff(o0.as().reg_type(), RegType::kVec8) != diff(o1.as().element_type(), VecElementType::kB) + L) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o1.as().type(), o1.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o1.as().reg_type(), o1.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(op_data.opcode()); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5; } @@ -3571,21 +3671,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVV: { - const InstDB::EncodingData::ISimdVV& opData = InstDB::EncodingData::iSimdVV[encodingIndex]; + const InstDB::EncodingData::ISimdVV& op_data = InstDB::EncodingData::iSimdVV[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - if (!matchSignature(o0, o1, instFlags)) + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(op_data.opcode()); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5; } @@ -3593,14 +3693,14 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVx: { - const InstDB::EncodingData::ISimdVVx& opData = InstDB::EncodingData::iSimdVVx[encodingIndex]; + const InstDB::EncodingData::ISimdVVx& op_data = InstDB::EncodingData::iSimdVVx[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { - if (o0.signature() != opData.op0Signature || - o1.signature() != opData.op1Signature) + if (o0.signature() != op_data.op0_signature || + o1.signature() != op_data.op1_signature) goto InvalidInstruction; - opcode.reset(opData.opcode()); + opcode.reset(op_data.opcode()); goto EmitOp_Rd0_Rn5; } @@ -3608,21 +3708,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVV: { - const InstDB::EncodingData::ISimdVVV& opData = InstDB::EncodingData::iSimdVVV[encodingIndex]; + const InstDB::EncodingData::ISimdVVV& op_data = InstDB::EncodingData::iSimdVVV[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - if (!matchSignature(o0, o1, o2, instFlags)) + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(op_data.opcode()); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3630,15 +3730,15 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVVx: { - const InstDB::EncodingData::ISimdVVVx& opData = InstDB::EncodingData::iSimdVVVx[encodingIndex]; + const InstDB::EncodingData::ISimdVVVx& op_data = InstDB::EncodingData::iSimdVVVx[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (o0.signature() != opData.op0Signature || - o1.signature() != opData.op1Signature || - o2.signature() != opData.op2Signature) + if (o0.signature() != op_data.op0_signature || + o1.signature() != op_data.op1_signature || + o2.signature() != op_data.op2_signature) goto InvalidInstruction; - opcode.reset(opData.opcode()); + opcode.reset(op_data.opcode()); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3647,20 +3747,20 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingISimdWWV: { // Special case for wide add/sub [s|b][add|sub][w]{2}. - const InstDB::EncodingData::ISimdWWV& opData = InstDB::EncodingData::iSimdWWV[encodingIndex]; + const InstDB::EncodingData::ISimdWWV& op_data = InstDB::EncodingData::iSimdWWV[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o2.as().type(), o2.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o2.as().reg_type(), o2.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (!checkSignature(o0, o1) || !o0.as().isVecV() || uint32_t(o0.as().elementType()) != uint32_t(o2.as().elementType()) + 1u) + if (!check_signature(o0, o1) || !o0.as().is_vec128() || uint32_t(o0.as().element_type()) != uint32_t(o2.as().element_type()) + 1u) goto InvalidInstruction; - opcode.reset(opData.opcode()); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(op_data.opcode()); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3668,46 +3768,46 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVVe: { - const InstDB::EncodingData::ISimdVVVe& opData = InstDB::EncodingData::iSimdVVVe[encodingIndex]; + const InstDB::EncodingData::ISimdVVVe& op_data = InstDB::EncodingData::iSimdVVVe[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - if (!matchSignature(o0, o1, instFlags)) + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - if (!o2.as().hasElementIndex()) { - SizeOp sizeOp = armElementTypeToSizeOp(opData.regularVecType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + if (!o2.as().has_element_index()) { + SizeOp size_op = element_type_to_size_op(op_data.regular_vec_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (!checkSignature(o1, o2)) + if (!check_signature(o1, o2)) goto InvalidInstruction; - opcode.reset(uint32_t(opData.regularOp) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.regular_op) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } else { - SizeOp sizeOp = armElementTypeToSizeOp(opData.elementVecType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.element_vec_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - uint32_t elementIndex = o2.as().elementIndex(); + uint32_t element_index = o2.as().element_index(); LMHImm lmh; - if (!encodeLMH(sizeOp.size(), elementIndex, &lmh)) + if (!encode_lmh(size_op.size(), element_index, Out(lmh))) goto InvalidElementIndex; - if (o2.as().id() > lmh.maxRmId) + if (o2.as().id() > lmh.max_rm_id) goto InvalidPhysId; - opcode.reset(uint32_t(opData.elementOp) << 10); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(sizeOp.size(), 22); - opcode.addImm(lmh.lm, 20); - opcode.addImm(lmh.h, 11); + opcode.reset(uint32_t(op_data.element_op) << 10); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(size_op.size(), 22); + opcode.add_imm(lmh.lm, 20); + opcode.add_imm(lmh.h, 11); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -3716,32 +3816,32 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVVI: { - const InstDB::EncodingData::ISimdVVVI& opData = InstDB::EncodingData::iSimdVVVI[encodingIndex]; + const InstDB::EncodingData::ISimdVVVI& op_data = InstDB::EncodingData::iSimdVVVI[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) { - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - if (!matchSignature(o0, o1, o2, instFlags)) + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - uint64_t immValue = o3.as().valueAs(); - uint32_t immSize = opData.immSize; + uint64_t imm_value = o3.as().value_as(); + uint32_t imm_size = op_data.imm_size; - if (opData.imm64HasOneBitLess && !sizeOp.q()) - immSize--; + if (op_data.imm64_has_one_bit_less && !size_op.q()) + imm_size--; - uint32_t immMax = 1u << immSize; - if (immValue >= immMax) + uint32_t immMax = 1u << imm_size; + if (imm_value >= immMax) goto InvalidImmediate; - opcode.reset(opData.opcode()); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); - opcode.addImm(immValue, opData.immShift); + opcode.reset(op_data.opcode()); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); + opcode.add_imm(imm_value, op_data.imm_shift); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3749,21 +3849,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVVV: { - const InstDB::EncodingData::ISimdVVVV& opData = InstDB::EncodingData::iSimdVVVV[encodingIndex]; + const InstDB::EncodingData::ISimdVVVV& op_data = InstDB::EncodingData::iSimdVVVV[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) { - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - if (!matchSignature(o0, o1, o2, o3, instFlags)) + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + if (!match_signature(o0, o1, o2, o3, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, sop.as().type(), sop.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, sop.as().reg_type(), sop.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16_Ra10; } @@ -3771,16 +3871,16 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingISimdVVVVx: { - const InstDB::EncodingData::ISimdVVVVx& opData = InstDB::EncodingData::iSimdVVVVx[encodingIndex]; + const InstDB::EncodingData::ISimdVVVVx& op_data = InstDB::EncodingData::iSimdVVVVx[encoding_index]; if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) { - if (o0.signature() != opData.op0Signature || - o1.signature() != opData.op1Signature || - o2.signature() != opData.op2Signature || - o3.signature() != opData.op3Signature) + if (o0.signature() != op_data.op0_signature || + o1.signature() != op_data.op1_signature || + o2.signature() != op_data.op2_signature || + o3.signature() != op_data.op3_signature) goto InvalidInstruction; - opcode.reset(uint32_t(opData.opcode) << 10); + opcode.reset(uint32_t(op_data.opcode) << 10); goto EmitOp_Rd0_Rn5_Rm16_Ra10; } @@ -3789,28 +3889,28 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingISimdPair: { - const InstDB::EncodingData::ISimdPair& opData = InstDB::EncodingData::iSimdPair[encodingIndex]; + const InstDB::EncodingData::ISimdPair& op_data = InstDB::EncodingData::iSimdPair[encoding_index]; - if (isign4 == ENC_OPS2(Reg, Reg) && opData.opcode2) { - if (o0.as().isVecD1() && o1.as().isVecD2()) { - opcode.reset(uint32_t(opData.opcode2) << 10); - opcode.addImm(0x3, 22); // size. + if (isign4 == ENC_OPS2(Reg, Reg) && op_data.opcode2) { + if (o0.as().is_vec_d1() && o1.as().is_vec_d2()) { + opcode.reset(uint32_t(op_data.opcode2) << 10); + opcode.add_imm(0x3, 22); // size. goto EmitOp_Rd0_Rn5; } } if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.opType3, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.op_type3, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(uint32_t(opData.opcode3) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.opcode3) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } @@ -3818,41 +3918,41 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdBicOrr: { - const InstDB::EncodingData::SimdBicOrr& opData = InstDB::EncodingData::simdBicOrr[encodingIndex]; + const InstDB::EncodingData::SimdBicOrr& op_data = InstDB::EncodingData::simdBicOrr[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(InstDB::kVO_V_B, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(InstDB::kVO_V_B, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(uint32_t(opData.registerOp) << 10); - opcode.addImm(sizeOp.q(), 30); + opcode.reset(uint32_t(op_data.register_op) << 10); + opcode.add_imm(size_op.q(), 30); goto EmitOp_Rd0_Rn5_Rm16; } if (isign4 == ENC_OPS2(Reg, Imm) || isign4 == ENC_OPS3(Reg, Imm, Imm)) { - SizeOp sizeOp = armElementTypeToSizeOp(InstDB::kVO_V_HS, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(InstDB::kVO_V_HS, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (o1.as().valueAs() > 0xFFFFFFFFu) + if (o1.as().value_as() > 0xFFFFFFFFu) goto InvalidImmediate; - uint32_t imm = o1.as().valueAs(); + uint32_t imm = o1.as().value_as(); uint32_t shift = 0; - uint32_t maxShift = (8u << sizeOp.size()) - 8u; + uint32_t max_shift = (8u << size_op.size()) - 8u; - if (o2.isImm()) { + if (o2.is_imm()) { if (o2.as().predicate() != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; - if (imm > 0xFFu || o2.as().valueAs() > maxShift) + if (imm > 0xFFu || o2.as().value_as() > max_shift) goto InvalidImmediate; - shift = o2.as().valueAs(); + shift = o2.as().value_as(); if ((shift & 0x7u) != 0u) goto InvalidImmediate; } @@ -3860,23 +3960,23 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co shift = Support::ctz(imm) & ~0x7u; imm >>= shift; - if (imm > 0xFFu || shift > maxShift) + if (imm > 0xFFu || shift > max_shift) goto InvalidImmediate; } uint32_t cmode = 0x1u | ((shift / 8u) << 1); - if (sizeOp.size() == 1) + if (size_op.size() == 1) cmode |= B(3); // The immediate value is split into ABC and DEFGH parts. uint32_t abc = (imm >> 5) & 0x7u; uint32_t defgh = imm & 0x1Fu; - opcode.reset(uint32_t(opData.immediateOp) << 10); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(abc, 16); - opcode.addImm(cmode, 12); - opcode.addImm(defgh, 5); + opcode.reset(uint32_t(op_data.immediate_op) << 10); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(abc, 16); + opcode.add_imm(cmode, 12); + opcode.add_imm(defgh, 5); goto EmitOp_Rd0; } @@ -3884,38 +3984,38 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdCmp: { - const InstDB::EncodingData::SimdCmp& opData = InstDB::EncodingData::simdCmp[encodingIndex]; + const InstDB::EncodingData::SimdCmp& op_data = InstDB::EncodingData::simdCmp[encoding_index]; - if (isign4 == ENC_OPS3(Reg, Reg, Reg) && opData.regOp) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (isign4 == ENC_OPS3(Reg, Reg, Reg) && op_data.register_op) { + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(uint32_t(opData.regOp) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.register_op) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.zeroOp) { - if (!matchSignature(o0, o1, instFlags)) + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.zero_op) { + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; if (o2.as().value() != 0) goto InvalidImmediate; - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - opcode.reset(uint32_t(opData.zeroOp) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.zero_op) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5; } @@ -3923,56 +4023,56 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdDot: { - const InstDB::EncodingData::SimdDot& opData = InstDB::EncodingData::simdDot[encodingIndex]; + const InstDB::EncodingData::SimdDot& op_data = InstDB::EncodingData::simdDot[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); uint32_t size = 2; if (q > 1u) goto InvalidInstruction; - if (!o2.as().hasElementIndex()) { - if (!opData.vectorOp) + if (!o2.as().has_element_index()) { + if (!op_data.vector_op) goto InvalidInstruction; - if (o0.as().type() != o1.as().type() || o1.as().type() != o2.as().type()) + if (o0.as().reg_type() != o1.as().reg_type() || o1.as().reg_type() != o2.as().reg_type()) goto InvalidInstruction; - if (uint32_t(o0.as().elementType()) != opData.tA || - uint32_t(o1.as().elementType()) != opData.tB || - uint32_t(o2.as().elementType()) != opData.tB) + if (uint32_t(o0.as().element_type()) != op_data.ta || + uint32_t(o1.as().element_type()) != op_data.tb || + uint32_t(o2.as().element_type()) != op_data.tb) goto InvalidInstruction; - opcode.reset(uint32_t(opData.vectorOp) << 10); - opcode.addImm(q, 30); + opcode.reset(uint32_t(op_data.vector_op) << 10); + opcode.add_imm(q, 30); goto EmitOp_Rd0_Rn5_Rm16; } else { - if (!opData.elementOp) + if (!op_data.element_op) goto InvalidInstruction; - if (o0.as().type() != o1.as().type() || !o2.as().isVecV()) + if (o0.as().reg_type() != o1.as().reg_type() || !o2.as().is_vec128()) goto InvalidInstruction; - if (uint32_t(o0.as().elementType()) != opData.tA || - uint32_t(o1.as().elementType()) != opData.tB || - uint32_t(o2.as().elementType()) != opData.tElement) + if (uint32_t(o0.as().element_type()) != op_data.ta || + uint32_t(o1.as().element_type()) != op_data.tb || + uint32_t(o2.as().element_type()) != op_data.tElement) goto InvalidInstruction; - uint32_t elementIndex = o2.as().elementIndex(); + uint32_t element_index = o2.as().element_index(); LMHImm lmh; - if (!encodeLMH(size, elementIndex, &lmh)) + if (!encode_lmh(size, element_index, Out(lmh))) goto InvalidElementIndex; - if (o2.as().id() > lmh.maxRmId) + if (o2.as().id() > lmh.max_rm_id) goto InvalidPhysId; - opcode.reset(uint32_t(opData.elementOp) << 10); - opcode.addImm(q, 30); - opcode.addImm(lmh.lm, 20); - opcode.addImm(lmh.h, 11); + opcode.reset(uint32_t(op_data.element_op) << 10); + opcode.add_imm(q, 30); + opcode.add_imm(lmh.lm, 20); + opcode.add_imm(lmh.h, 11); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -3991,60 +4091,60 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co B(uint32_t(VecElementType::kS) + 8) | B(uint32_t(VecElementType::kD) + 8) ; - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); - if (o1.as().isGp()) { + if (o1.as().is_gp()) { // DUP - Vec (scalar|vector) <- GP register. // // NOTE: This is only scalar for `dup d, x` case, otherwise the value // would be duplicated across all vector elements (1, 2, 4, 8, or 16). - uint32_t elementType = uint32_t(o0.as().elementType()); - if (q > 1 || !Support::bitTest(kValidEncodings, (q << 3) | elementType)) + uint32_t element_type = uint32_t(o0.as().element_type()); + if (q > 1 || !Support::bit_test(kValidEncodings, (q << 3) | element_type)) goto InvalidInstruction; - uint32_t lsbIndex = elementType - 1u; - uint32_t imm5 = 1u << lsbIndex; + uint32_t lsb_index = element_type - 1u; + uint32_t imm5 = 1u << lsb_index; opcode.reset(0b0000111000000000000011 << 10); - opcode.addImm(q, 30); - opcode.addImm(imm5, 16); + opcode.add_imm(q, 30); + opcode.add_imm(imm5, 16); goto EmitOp_Rd0_Rn5; } - if (!o1.as().isVec() || !o1.as().hasElementIndex()) + if (!o1.as().is_vec() || !o1.as().has_element_index()) goto InvalidInstruction; - uint32_t dstIndex = o1.as().elementIndex(); - if (!o0.as().hasElementType()) { + uint32_t dst_index = o1.as().element_index(); + if (!o0.as().has_element_type()) { // DUP - Vec (scalar) <- Vec[N]. - uint32_t lsbIndex = diff(o0.as().type(), RegType::kARM_VecB); + uint32_t lsb_index = diff(o0.as().reg_type(), RegType::kVec8); - if (lsbIndex != diff(o1.as().elementType(), VecElementType::kB) || lsbIndex > 3) + if (lsb_index != diff(o1.as().element_type(), VecElementType::kB) || lsb_index > 3) goto InvalidInstruction; - uint32_t imm5 = ((dstIndex << 1) | 1u) << lsbIndex; + uint32_t imm5 = ((dst_index << 1) | 1u) << lsb_index; if (imm5 > 31) goto InvalidElementIndex; opcode.reset(0b0101111000000000000001 << 10); - opcode.addImm(imm5, 16); + opcode.add_imm(imm5, 16); goto EmitOp_Rd0_Rn5; } else { // DUP - Vec (all) <- Vec[N]. - uint32_t elementType = uint32_t(o0.as().elementType()); - if (q > 1 || !Support::bitTest(kValidEncodings, (q << 3) | elementType)) + uint32_t element_type = uint32_t(o0.as().element_type()); + if (q > 1 || !Support::bit_test(kValidEncodings, (q << 3) | element_type)) goto InvalidInstruction; - uint32_t lsbIndex = elementType - 1u; - uint32_t imm5 = ((dstIndex << 1) | 1u) << lsbIndex; + uint32_t lsb_index = element_type - 1u; + uint32_t imm5 = ((dst_index << 1) | 1u) << lsb_index; if (imm5 > 31) goto InvalidElementIndex; opcode.reset(0b0000111000000000000001 << 10); - opcode.addImm(q, 30); - opcode.addImm(imm5, 16); + opcode.add_imm(q, 30); + opcode.add_imm(imm5, 16); goto EmitOp_Rd0_Rn5; } } @@ -4053,40 +4153,40 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdIns: SimdIns: { - if (isign4 == ENC_OPS2(Reg, Reg) && o0.as().isVecV()) { - if (!o0.as().hasElementIndex()) + if (isign4 == ENC_OPS2(Reg, Reg) && o0.as().is_vec128()) { + if (!o0.as().has_element_index()) goto InvalidInstruction; - uint32_t elementType = uint32_t(o0.as().elementType()); - uint32_t dstIndex = o0.as().elementIndex(); - uint32_t lsbIndex = elementType - 1u; + uint32_t element_type = uint32_t(o0.as().element_type()); + uint32_t dst_index = o0.as().element_index(); + uint32_t lsb_index = element_type - 1u; - uint32_t imm5 = ((dstIndex << 1) | 1u) << lsbIndex; + uint32_t imm5 = ((dst_index << 1) | 1u) << lsb_index; if (imm5 > 31) goto InvalidElementIndex; - if (o1.as().isGp()) { + if (o1.as().is_gp()) { // INS - Vec[N] <- GP register. opcode.reset(0b0100111000000000000111 << 10); - opcode.addImm(imm5, 16); + opcode.add_imm(imm5, 16); goto EmitOp_Rd0_Rn5; } - else if (o1.as().isVecV() && o1.as().hasElementIndex()) { + else if (o1.as().is_vec128() && o1.as().has_element_index()) { // INS - Vec[N] <- Vec[M]. - if (o0.as().elementType() != o1.as().elementType()) + if (o0.as().element_type() != o1.as().element_type()) goto InvalidInstruction; - uint32_t srcIndex = o1.as().elementIndex(); - if (o0.as().type() != o1.as().type()) + uint32_t src_index = o1.as().element_index(); + if (o0.as().reg_type() != o1.as().reg_type()) goto InvalidInstruction; - uint32_t imm4 = srcIndex << lsbIndex; + uint32_t imm4 = src_index << lsb_index; if (imm4 > 15) goto InvalidElementIndex; opcode.reset(0b0110111000000000000001 << 10); - opcode.addImm(imm5, 16); - opcode.addImm(imm4, 11); + opcode.add_imm(imm5, 16); + opcode.add_imm(imm4, 11); goto EmitOp_Rd0_Rn5; } } @@ -4096,40 +4196,40 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingSimdMov: { if (isign4 == ENC_OPS2(Reg, Reg)) { - if (o0.as().isVec() && o1.as().isVec()) { + if (o0.as().is_vec() && o1.as().is_vec()) { // INS v.x[index], v.x[index]. - if (o0.as().hasElementIndex() && o1.as().hasElementIndex()) + if (o0.as().has_element_index() && o1.as().has_element_index()) goto SimdIns; // DUP {b|h|s|d}, v.{b|h|s|d}[index]. - if (o1.as().hasElementIndex()) + if (o1.as().has_element_index()) goto SimdDup; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; // ORR Vd, Vn, Vm - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; opcode.reset(0b0000111010100000000111 << 10); - opcode.addImm(q, 30); - opcode.addReg(o1, 16); // Vn == Vm. + opcode.add_imm(q, 30); + opcode.add_reg(o1, 16); // Vn == Vm. goto EmitOp_Rd0_Rn5; } - if (o0.as().isVec() && o1.as().isGp()) { + if (o0.as().is_vec() && o1.as().is_gp()) { // INS v.x[index], Rn. - if (o0.as().hasElementIndex()) + if (o0.as().has_element_index()) goto SimdIns; goto InvalidInstruction; } - if (o0.as().isGp() && o1.as().isVec()) { + if (o0.as().is_gp() && o1.as().is_vec()) { // UMOV Rd, V.{s|d}[index]. - encodingIndex = 1; + encoding_index = 1; goto SimdUmov; } } @@ -4138,36 +4238,36 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdMoviMvni: { - const InstDB::EncodingData::SimdMoviMvni& opData = InstDB::EncodingData::simdMoviMvni[encodingIndex]; + const InstDB::EncodingData::SimdMoviMvni& op_data = InstDB::EncodingData::simdMoviMvni[encoding_index]; if (isign4 == ENC_OPS2(Reg, Imm) || isign4 == ENC_OPS3(Reg, Imm, Imm)) { - SizeOp sizeOp = armElementTypeToSizeOp(InstDB::kVO_V_Any, o0.as().type(), o0.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(InstDB::kVO_V_Any, o0.as().reg_type(), o0.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - uint64_t imm64 = o1.as().valueAs(); + uint64_t imm64 = o1.as().value_as(); uint32_t imm8 = 0; uint32_t cmode = 0; - uint32_t inverted = opData.inverted; + uint32_t inverted = op_data.inverted; uint32_t op = 0; uint32_t shift = 0; - uint32_t shiftOp = uint32_t(ShiftOp::kLSL); + uint32_t shift_op = uint32_t(ShiftOp::kLSL); - if (sizeOp.size() == 3u) { + if (size_op.size() == 3u) { // The second immediate should not be present, however, we accept // an immediate value of zero as some user code may still pass it. - if (o2.isImm() && o0.as().value() != 0) + if (o2.is_imm() && o0.as().value() != 0) goto InvalidImmediate; - if (Utils::isByteMaskImm8(imm64)) { - imm8 = Utils::encodeImm64ByteMaskToImm8(imm64); + if (Utils::is_byte_mask_imm(imm64)) { + imm8 = Utils::encode_imm64_byte_mask_to_imm8(imm64); } else { // Change from D to S and from 64-bit imm to 32-bit imm if this // is not a byte-mask pattern. if ((imm64 >> 32) == (imm64 & 0xFFFFFFFFu)) { imm64 &= 0xFFFFFFFFu; - sizeOp.decrementSize(); + size_op.decrement_size(); } else { goto InvalidImmediate; @@ -4175,41 +4275,41 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } } - if (sizeOp.size() < 3u) { + if (size_op.size() < 3u) { if (imm64 > 0xFFFFFFFFu) goto InvalidImmediate; imm8 = uint32_t(imm64); - if (sizeOp.size() == 2) { + if (size_op.size() == 2) { if ((imm8 >> 16) == (imm8 & 0xFFFFu)) { imm8 >>= 16; - sizeOp.decrementSize(); + size_op.decrement_size(); } } - if (sizeOp.size() == 1) { + if (size_op.size() == 1) { if (imm8 > 0xFFFFu) goto InvalidImmediate; if ((imm8 >> 8) == (imm8 & 0xFFu)) { imm8 >>= 8; - sizeOp.decrementSize(); + size_op.decrement_size(); } } - uint32_t maxShift = (8u << sizeOp.size()) - 8u; - if (o2.isImm()) { - if (imm8 > 0xFFu || o2.as().valueAs() > maxShift) + uint32_t max_shift = (8u << size_op.size()) - 8u; + if (o2.is_imm()) { + if (imm8 > 0xFFu || o2.as().value_as() > max_shift) goto InvalidImmediate; - shift = o2.as().valueAs(); - shiftOp = o2.as().predicate(); + shift = o2.as().value_as(); + shift_op = o2.as().predicate(); } else if (imm8) { shift = Support::ctz(imm8) & ~0x7u; imm8 >>= shift; - if (imm8 > 0xFFu || shift > maxShift) + if (imm8 > 0xFFu || shift > max_shift) goto InvalidImmediate; } @@ -4219,9 +4319,9 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co shift /= 8u; - switch (sizeOp.size()) { + switch (size_op.size()) { case 0: - if (shiftOp != uint32_t(ShiftOp::kLSL)) + if (shift_op != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; if (inverted) { @@ -4232,7 +4332,7 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co break; case 1: - if (shiftOp != uint32_t(ShiftOp::kLSL)) + if (shift_op != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; cmode = B(3) | (shift << 1); @@ -4240,10 +4340,10 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co break; case 2: - if (shiftOp == uint32_t(ShiftOp::kLSL)) { + if (shift_op == uint32_t(ShiftOp::kLSL)) { cmode = shift << 1; } - else if (shiftOp == uint32_t(ShiftOp::kMSL)) { + else if (shift_op == uint32_t(ShiftOp::kMSL)) { if (shift == 0 || shift > 2) goto InvalidImmediate; cmode = B(3) | B(2) | (shift - 1u); @@ -4269,12 +4369,12 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co uint32_t abc = (imm8 >> 5) & 0x7u; uint32_t defgh = imm8 & 0x1Fu; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(op, 29); - opcode.addImm(abc, 16); - opcode.addImm(cmode, 12); - opcode.addImm(defgh, 5); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(op, 29); + opcode.add_imm(abc, 16); + opcode.add_imm(cmode, 12); + opcode.add_imm(defgh, 5); goto EmitOp_Rd0; } @@ -4282,51 +4382,51 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdShift: { - const InstDB::EncodingData::SimdShift& opData = InstDB::EncodingData::simdShift[encodingIndex]; + const InstDB::EncodingData::SimdShift& op_data = InstDB::EncodingData::simdShift[encoding_index]; - const Operand_& sop = significantSimdOp(o0, o1, instFlags); - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, sop.as().type(), sop.as().elementType()); + const Operand_& sop = significant_simd_op(o0, o1, inst_flags); + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, sop.as().reg_type(), sop.as().element_type()); - if (!sizeOp.isValid()) + if (!size_op.is_valid()) goto InvalidInstruction; - if (isign4 == ENC_OPS3(Reg, Reg, Imm) && opData.immediateOp) { - if (!matchSignature(o0, o1, instFlags)) + if (isign4 == ENC_OPS3(Reg, Reg, Imm) && op_data.immediate_op) { + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - if (o2.as().valueAs() > 63) + if (o2.as().value_as() > 63) goto InvalidImmediate; - uint32_t lsbShift = sizeOp.size() + 3u; - uint32_t lsbMask = (1u << lsbShift) - 1u; - uint32_t imm = o2.as().valueAs(); + uint32_t lsb_shift = size_op.size() + 3u; + uint32_t lsb_mask = (1u << lsb_shift) - 1u; + uint32_t imm = o2.as().value_as(); // Some instructions use IMM and some X - IMM, so negate if required. - if (opData.invertedImm) { - if (imm == 0 || imm > (1u << lsbShift)) + if (op_data.inverted_imm) { + if (imm == 0 || imm > (1u << lsb_shift)) goto InvalidImmediate; - imm = Support::neg(imm) & lsbMask; + imm = Support::neg(imm) & lsb_mask; } - if (imm > lsbMask) + if (imm > lsb_mask) goto InvalidImmediate; - imm |= (1u << lsbShift); + imm |= (1u << lsb_shift); - opcode.reset(uint32_t(opData.immediateOp) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(imm, 16); + opcode.reset(uint32_t(op_data.immediate_op) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(imm, 16); goto EmitOp_Rd0_Rn5; } - if (isign4 == ENC_OPS3(Reg, Reg, Reg) && opData.registerOp) { - if (!matchSignature(o0, o1, o2, instFlags)) + if (isign4 == ENC_OPS3(Reg, Reg, Reg) && op_data.register_op) { + if (!match_signature(o0, o1, o2, inst_flags)) goto InvalidInstruction; - opcode.reset(uint32_t(opData.registerOp) << 10); - opcode.addImm(sizeOp.qs(), 30); - opcode.addImm(sizeOp.scalar(), 28); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.register_op) << 10); + opcode.add_imm(size_op.qs(), 30); + opcode.add_imm(size_op.scalar(), 28); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5_Rm16; } @@ -4334,26 +4434,26 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdShiftES: { - const InstDB::EncodingData::SimdShiftES& opData = InstDB::EncodingData::simdShiftES[encodingIndex]; + const InstDB::EncodingData::SimdShiftES& op_data = InstDB::EncodingData::simdShiftES[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Imm)) { - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o1.as().type(), o1.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o1.as().reg_type(), o1.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (!matchSignature(o0, o1, instFlags)) + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; // The immediate value must match the element size. - uint64_t shift = o2.as().valueAs(); - uint32_t shiftOp = o2.as().predicate(); + uint64_t shift = o2.as().value_as(); + uint32_t shift_op = o2.as().predicate(); - if (shift != (8u << sizeOp.size()) || shiftOp != uint32_t(ShiftOp::kLSL)) + if (shift != (8u << size_op.size()) || shift_op != uint32_t(ShiftOp::kLSL)) goto InvalidImmediate; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(sizeOp.size(), 22); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(size_op.size(), 22); goto EmitOp_Rd0_Rn5; } @@ -4361,16 +4461,16 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdSm3tt: { - const InstDB::EncodingData::SimdSm3tt& opData = InstDB::EncodingData::simdSm3tt[encodingIndex]; + const InstDB::EncodingData::SimdSm3tt& op_data = InstDB::EncodingData::simdSm3tt[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg)) { - if (o0.as().isVecS4() && o1.as().isVecS4() && o2.as().isVecS4() && o2.as().hasElementIndex()) { - uint32_t imm2 = o2.as().elementIndex(); + if (o0.as().is_vec_s4() && o1.as().is_vec_s4() && o2.as().is_vec_s4() && o2.as().has_element_index()) { + uint32_t imm2 = o2.as().element_index(); if (imm2 > 3) goto InvalidElementIndex; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(imm2, 12); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(imm2, 12); goto EmitOp_Rd0_Rn5_Rm16; } } @@ -4380,39 +4480,39 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingSimdSmovUmov: SimdUmov: { - const InstDB::EncodingData::SimdSmovUmov& opData = InstDB::EncodingData::simdSmovUmov[encodingIndex]; + const InstDB::EncodingData::SimdSmovUmov& op_data = InstDB::EncodingData::simdSmovUmov[encoding_index]; - if (isign4 == ENC_OPS2(Reg, Reg) && o0.as().isGp() && o1.as().isVec()) { - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o1.as().type(), o1.as().elementType()); - if (!sizeOp.isValid()) + if (isign4 == ENC_OPS2(Reg, Reg) && o0.as().is_gp() && o1.as().is_vec()) { + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o1.as().reg_type(), o1.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (!o1.as().hasElementIndex()) + if (!o1.as().has_element_index()) goto InvalidInstruction; - uint32_t x = o0.as().isGpX(); - uint32_t gpMustBeX = uint32_t(sizeOp.size() >= 3u - opData.isSigned); + uint32_t x = o0.as().is_gp64(); + uint32_t gp_must_be_x = uint32_t(size_op.size() >= 3u - op_data.is_signed); - if (opData.isSigned) { - if (gpMustBeX && !x) + if (op_data.is_signed) { + if (gp_must_be_x && !x) goto InvalidInstruction; } else { - if (x != gpMustBeX) + if (x != gp_must_be_x) goto InvalidInstruction; } - uint32_t elementIndex = o1.as().elementIndex(); - uint32_t maxElementIndex = 15u >> sizeOp.size(); + uint32_t element_index = o1.as().element_index(); + uint32_t max_element_index = 15u >> size_op.size(); - if (elementIndex > maxElementIndex) + if (element_index > max_element_index) goto InvalidElementIndex; - uint32_t imm5 = (1u | (elementIndex << 1)) << sizeOp.size(); + uint32_t imm5 = (1u | (element_index << 1)) << size_op.size(); - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(x, 30); - opcode.addImm(imm5, 16); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(x, 30); + opcode.add_imm(imm5, 16); goto EmitOp_Rd0_Rn5; } @@ -4420,19 +4520,19 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdSxtlUxtl: { - const InstDB::EncodingData::SimdSxtlUxtl& opData = InstDB::EncodingData::simdSxtlUxtl[encodingIndex]; + const InstDB::EncodingData::SimdSxtlUxtl& op_data = InstDB::EncodingData::simdSxtlUxtl[encoding_index]; if (isign4 == ENC_OPS2(Reg, Reg)) { - SizeOp sizeOp = armElementTypeToSizeOp(opData.vecOpType, o1.as().type(), o1.as().elementType()); - if (!sizeOp.isValid()) + SizeOp size_op = element_type_to_size_op(op_data.vec_op_type, o1.as().reg_type(), o1.as().element_type()); + if (!size_op.is_valid()) goto InvalidInstruction; - if (!matchSignature(o0, o1, instFlags)) + if (!match_signature(o0, o1, inst_flags)) goto InvalidInstruction; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(sizeOp.q(), 30); - opcode.addImm(1u, sizeOp.size() + 19); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(size_op.q(), 30); + opcode.add_imm(1u, size_op.size() + 19); goto EmitOp_Rd0_Rn5; } @@ -4440,68 +4540,68 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdTblTbx: { - const InstDB::EncodingData::SimdTblTbx& opData = InstDB::EncodingData::simdTblTbx[encodingIndex]; + const InstDB::EncodingData::SimdTblTbx& op_data = InstDB::EncodingData::simdTblTbx[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Reg) || isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) { // TBL/TBX ., { .16B }, . // TBL/TBX ., { .16B, .16B }, . // TBL/TBX ., { .16B, .16B, .16B }, . // TBL/TBX ., { .16B, .16B, .16B, .16B }, . - opcode.reset(uint32_t(opData.opcode) << 10); + opcode.reset(uint32_t(op_data.opcode) << 10); - const Operand_& o4 = opExt[EmitterUtils::kOp4]; - const Operand_& o5 = opExt[EmitterUtils::kOp5]; + const Operand_& o4 = op_ext[EmitterUtils::kOp4]; + const Operand_& o5 = op_ext[EmitterUtils::kOp5]; - uint32_t q = diff(o0.as().type(), RegType::kARM_VecD); - if (q > 1 || o0.as().hasElementIndex()) + uint32_t q = diff(o0.as().reg_type(), RegType::kVec64); + if (q > 1 || o0.as().has_element_index()) goto InvalidInstruction; - if (!o1.as().isVecB16() || o1.as().hasElementIndex()) + if (!o1.as().is_vec_b16() || o1.as().has_element_index()) goto InvalidInstruction; - uint32_t len = uint32_t(!o3.isNone()) + uint32_t(!o4.isNone()) + uint32_t(!o5.isNone()); - opcode.addImm(q, 30); - opcode.addImm(len, 13); + uint32_t len = uint32_t(!o3.is_none()) + uint32_t(!o4.is_none()) + uint32_t(!o5.is_none()); + opcode.add_imm(q, 30); + opcode.add_imm(len, 13); switch (len) { case 0: - if (!checkSignature(o0, o2)) + if (!check_signature(o0, o2)) goto InvalidInstruction; if (o2.id() > 31) goto InvalidPhysId; - opcode.addReg(o2, 16); + opcode.add_reg(o2, 16); goto EmitOp_Rd0_Rn5; case 1: - if (!checkSignature(o0, o3)) + if (!check_signature(o0, o3)) goto InvalidInstruction; if (o3.id() > 31) goto InvalidPhysId; - opcode.addReg(o3, 16); + opcode.add_reg(o3, 16); goto EmitOp_Rd0_Rn5; case 2: - if (!checkSignature(o0, o4)) + if (!check_signature(o0, o4)) goto InvalidInstruction; if (o4.id() > 31) goto InvalidPhysId; - opcode.addReg(o4, 16); + opcode.add_reg(o4, 16); goto EmitOp_Rd0_Rn5; case 3: - if (!checkSignature(o0, o5)) + if (!check_signature(o0, o5)) goto InvalidInstruction; if (o5.id() > 31) goto InvalidPhysId; - opcode.addReg(o5, 16); + opcode.add_reg(o5, 16); goto EmitOp_Rd0_Rn5; default: @@ -4518,11 +4618,11 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // ------------------------------------------------------------------------ case InstDB::kEncodingSimdLdSt: { - const InstDB::EncodingData::SimdLdSt& opData = InstDB::EncodingData::simdLdSt[encodingIndex]; + const InstDB::EncodingData::SimdLdSt& op_data = InstDB::EncodingData::simdLdSt[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; // Width | SZ | XY | XSZ // -------+----------+-----------+----- @@ -4531,21 +4631,21 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // 32-bit | size==10 | opc == 01 | 010 // 64-bit | size==11 | opc == 01 | 011 // 128-bit| size==00 | opc == 11 | 100 - uint32_t xsz = diff(o0.as().type(), RegType::kARM_VecB); - if (xsz > 4u || o0.as().hasElementIndex()) + uint32_t xsz = diff(o0.as().reg_type(), RegType::kVec8); + if (xsz > 4u || o0.as().has_element_index()) goto InvalidRegType; - if (!checkVecId(o0)) + if (!check_vec_id(o0)) goto InvalidPhysId; - if (!armCheckMemBaseIndexRel(m)) + if (!check_mem_base_index_rel(m)) goto InvalidAddress; int64_t offset = m.offset(); - if (m.hasBaseReg()) { + if (m.has_base_reg()) { // [Base {Offset | Index}] - if (m.hasIndex()) { - uint32_t opt = armShiftOpToLdStOptMap[size_t(m.shiftOp())]; + if (m.has_index()) { + uint32_t opt = shift_op_to_ld_st_opt_map[size_t(m.shift_op())]; if (opt == 0xFFu) goto InvalidAddress; @@ -4555,65 +4655,65 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co if (s && shift != xsz) goto InvalidAddressScale; - opcode.reset(uint32_t(opData.registerOp) << 21); - opcode.addImm(xsz & 3u, 30); - opcode.addImm(xsz >> 2, 23); - opcode.addImm(opt, 13); - opcode.addImm(s, 12); + opcode.reset(uint32_t(op_data.register_op) << 21); + opcode.add_imm(xsz & 3u, 30); + opcode.add_imm(xsz >> 2, 23); + opcode.add_imm(opt, 13); + opcode.add_imm(s, 12); opcode |= B(11); - opcode.addReg(o0, 0); + opcode.add_reg(o0, 0); goto EmitOp_MemBaseIndex_Rn5_Rm16; } // Makes it easier to work with the offset especially on 32-bit arch. - if (!Support::isInt32(offset)) + if (!Support::is_int_n<32>(offset)) goto InvalidDisplacement; int32_t offset32 = int32_t(offset); - if (m.isPreOrPost()) { - if (!Support::isInt9(offset32)) + if (m.is_pre_or_post()) { + if (!Support::is_int_n<9>(offset32)) goto InvalidDisplacement; - opcode.reset(uint32_t(opData.prePostOp) << 21); - opcode.addImm(xsz & 3u, 30); - opcode.addImm(xsz >> 2, 23); - opcode.addImm(offset32 & 0x1FF, 12); - opcode.addImm(m.isPreIndex(), 11); + opcode.reset(uint32_t(op_data.pre_post_op) << 21); + opcode.add_imm(xsz & 3u, 30); + opcode.add_imm(xsz >> 2, 23); + opcode.add_imm(offset32 & 0x1FF, 12); + opcode.add_imm(m.is_pre_index(), 11); opcode |= B(10); - opcode.addReg(o0, 0); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } else { uint32_t imm12 = uint32_t(offset32) >> xsz; // If this instruction is not encodable with scaled unsigned offset, try unscaled signed offset. - if (!Support::isUInt12(imm12) || (imm12 << xsz) != uint32_t(offset32)) { - instId = opData.uAltInstId; - instInfo = &InstDB::_instInfoTable[instId]; - encodingIndex = instInfo->_encodingDataIndex; + if (!Support::is_uint_n<12>(imm12) || (imm12 << xsz) != uint32_t(offset32)) { + inst_id = op_data.u_alt_inst_id; + inst_info = &InstDB::_inst_info_table[inst_id]; + encoding_index = inst_info->_encoding_data_index; goto Case_SimdLdurStur; } - opcode.reset(uint32_t(opData.uOffsetOp) << 22); - opcode.addImm(xsz & 3u, 30); - opcode.addImm(xsz >> 2, 23); - opcode.addImm(imm12, 10); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.u_offset_op) << 22); + opcode.add_imm(xsz & 3u, 30); + opcode.add_imm(xsz >> 2, 23); + opcode.add_imm(imm12, 10); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } } else { - if (!opData.literalOp) + if (!op_data.literal_op) goto InvalidAddress; if (xsz < 2u) goto InvalidRegType; uint32_t opc = xsz - 2u; - opcode.reset(uint32_t(opData.literalOp) << 24); - opcode.addImm(opc, 30); - opcode.addReg(o0, 0); - offsetFormat.resetToImmValue(OffsetType::kSignedOffset, 4, 5, 19, 2); + opcode.reset(uint32_t(op_data.literal_op) << 24); + opcode.add_imm(opc, 30); + opcode.add_reg(o0, 0); + offset_format.reset_to_imm_value(OffsetType::kSignedOffset, 4, 5, 19, 2); goto EmitOp_Rel; } } @@ -4622,54 +4722,54 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdLdpStp: { - const InstDB::EncodingData::SimdLdpStp& opData = InstDB::EncodingData::simdLdpStp[encodingIndex]; + const InstDB::EncodingData::SimdLdpStp& op_data = InstDB::EncodingData::simdLdpStp[encoding_index]; if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { const Mem& m = o2.as(); - rmRel = &m; + rm_rel = &m; - uint32_t opc = diff(o0.as().type(), RegType::kARM_VecS); - if (opc > 2u || o0.as().hasElementTypeOrIndex()) + uint32_t opc = diff(o0.as().reg_type(), RegType::kVec32); + if (opc > 2u || o0.as().has_element_type_or_index()) goto InvalidInstruction; - if (!checkSignature(o0, o1)) + if (!check_signature(o0, o1)) goto InvalidInstruction; - if (!checkVecId(o0, o1)) + if (!check_vec_id(o0, o1)) goto InvalidPhysId; - if (m.baseType() != RegType::kARM_GpX || m.hasIndex()) + if (m.base_type() != RegType::kGp64 || m.has_index()) goto InvalidAddress; - if (m.isOffset64Bit()) + if (m.is_offset_64bit()) goto InvalidDisplacement; - uint32_t offsetShift = 2u + opc; - int32_t offset32 = m.offsetLo32() >> offsetShift; + uint32_t offset_shift = 2u + opc; + int32_t offset32 = m.offset_lo32() >> offset_shift; // Make sure we didn't lose bits by applying the mandatory offset shift. - if (Support::shl(offset32, offsetShift) != m.offsetLo32()) + if (Support::shl(offset32, offset_shift) != m.offset_lo32()) goto InvalidDisplacement; // Offset is encoded as a 7-bit immediate. - if (!Support::isInt7(offset32)) + if (!Support::is_int_n<7>(offset32)) goto InvalidDisplacement; - if (m.isPreOrPost() && offset32 != 0) { - if (!opData.prePostOp) + if (m.is_pre_or_post() && offset32 != 0) { + if (!op_data.pre_post_op) goto InvalidAddress; - opcode.reset(uint32_t(opData.prePostOp) << 22); - opcode.addImm(m.isPreIndex(), 24); + opcode.reset(uint32_t(op_data.pre_post_op) << 22); + opcode.add_imm(m.is_pre_index(), 24); } else { - opcode.reset(uint32_t(opData.offsetOp) << 22); + opcode.reset(uint32_t(op_data.offset_op) << 22); } - opcode.addImm(opc, 30); - opcode.addImm(offset32 & 0x7F, 15); - opcode.addReg(o1, 10); - opcode.addReg(o0, 0); + opcode.add_imm(opc, 30); + opcode.add_imm(offset32 & 0x7F, 15); + opcode.add_reg(o1, 10); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } @@ -4678,35 +4778,35 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co case InstDB::kEncodingSimdLdurStur: { Case_SimdLdurStur: - const InstDB::EncodingData::SimdLdurStur& opData = InstDB::EncodingData::simdLdurStur[encodingIndex]; + const InstDB::EncodingData::SimdLdurStur& op_data = InstDB::EncodingData::simdLdurStur[encoding_index]; if (isign4 == ENC_OPS2(Reg, Mem)) { const Mem& m = o1.as(); - rmRel = &m; + rm_rel = &m; - uint32_t sz = diff(o0.as().type(), RegType::kARM_VecB); - if (sz > 4 || o0.as().hasElementTypeOrIndex()) + uint32_t sz = diff(o0.as().reg_type(), RegType::kVec8); + if (sz > 4 || o0.as().has_element_type_or_index()) goto InvalidInstruction; - if (!checkVecId(o0)) + if (!check_vec_id(o0)) goto InvalidPhysId; - if (!armCheckMemBaseIndexRel(m)) + if (!check_mem_base_index_rel(m)) goto InvalidAddress; - if (m.hasBaseReg() && !m.hasIndex() && !m.isPreOrPost()) { - if (m.isOffset64Bit()) + if (m.has_base_reg() && !m.has_index() && !m.is_pre_or_post()) { + if (m.is_offset_64bit()) goto InvalidDisplacement; - int32_t offset32 = m.offsetLo32(); - if (!Support::isInt9(offset32)) + int32_t offset32 = m.offset_lo32(); + if (!Support::is_int_n<9>(offset32)) goto InvalidDisplacement; - opcode.reset(uint32_t(opData.opcode) << 10); - opcode.addImm(sz & 3u, 30); - opcode.addImm(sz >> 2, 23); - opcode.addImm(offset32 & 0x1FF, 12); - opcode.addReg(o0, 0); + opcode.reset(uint32_t(op_data.opcode) << 10); + opcode.add_imm(sz & 3u, 30); + opcode.add_imm(sz >> 2, 23); + opcode.add_imm(offset32 & 0x1FF, 12); + opcode.add_reg(o0, 0); goto EmitOp_MemBase_Rn5; } @@ -4717,46 +4817,46 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } case InstDB::kEncodingSimdLdNStN: { - const InstDB::EncodingData::SimdLdNStN& opData = InstDB::EncodingData::simdLdNStN[encodingIndex]; - const Operand_& o4 = opExt[EmitterUtils::kOp4]; + const InstDB::EncodingData::SimdLdNStN& op_data = InstDB::EncodingData::simdLdNStN[encoding_index]; + const Operand_& o4 = op_ext[EmitterUtils::kOp4]; uint32_t n = 1; if (isign4 == ENC_OPS2(Reg, Mem)) { - if (opData.n != 1) + if (op_data.n != 1) goto InvalidInstruction; - rmRel = &o1; + rm_rel = &o1; } else if (isign4 == ENC_OPS3(Reg, Reg, Mem)) { - if (opData.n != 1 && opData.n != 2) + if (op_data.n != 1 && op_data.n != 2) goto InvalidInstruction; - if (!checkSignature(o0, o1) || !checkConsecutive(o0, o1)) + if (!check_signature(o0, o1) || !check_consecutive(o0, o1)) goto InvalidInstruction; n = 2; - rmRel = &o2; + rm_rel = &o2; } - else if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem) && o4.isNone()) { - if (opData.n != 1 && opData.n != 3) + else if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem) && o4.is_none()) { + if (op_data.n != 1 && op_data.n != 3) goto InvalidInstruction; - if (!checkSignature(o0, o1, o2) || !checkConsecutive(o0, o1, o2)) + if (!check_signature(o0, o1, o2) || !check_consecutive(o0, o1, o2)) goto InvalidInstruction; n = 3; - rmRel = &o3; + rm_rel = &o3; } - else if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg) && o4.isMem()) { - if (opData.n != 1 && opData.n != 4) + else if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg) && o4.is_mem()) { + if (op_data.n != 1 && op_data.n != 4) goto InvalidInstruction; - if (!checkSignature(o0, o1, o2, o3) || !checkConsecutive(o0, o1, o2, o3)) + if (!check_signature(o0, o1, o2, o3) || !check_consecutive(o0, o1, o2, o3)) goto InvalidInstruction; n = 4; - rmRel = &o4; + rm_rel = &o4; } else { goto InvalidInstruction; @@ -4764,19 +4864,19 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // We will use `v` and `m` from now as those are relevant for encoding. const Vec& v = o0.as(); - const Mem& m = rmRel->as(); + const Mem& m = rm_rel->as(); uint32_t q = 0; uint32_t rm = 0; - uint32_t rn = m.baseId(); - uint32_t sz = diff(v.elementType(), VecElementType::kB); - uint32_t opcSsize = sz; - uint32_t offsetPossibility = 0; + uint32_t rn = m.base_id(); + uint32_t sz = diff(v.element_type(), VecElementType::kB); + uint32_t opc_s_size = sz; + uint32_t offset_possibility = 0; if (sz > 3) goto InvalidInstruction; - if (m.baseType() != RegType::kARM_GpX) + if (m.base_type() != RegType::kGp64) goto InvalidAddress; // Rn cannot be ZR, but can be SP. @@ -4785,62 +4885,62 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co rn &= 31; - if (opData.replicate) { - if (n != opData.n) + if (op_data.replicate) { + if (n != op_data.n) goto InvalidInstruction; // Replicates to the whole register, element index cannot be used. - if (v.hasElementIndex()) + if (v.has_element_index()) goto InvalidInstruction; - q = diff(v.type(), RegType::kARM_VecD); + q = diff(v.reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - opcode.reset(uint32_t(opData.singleOp) << 10); - offsetPossibility = (1u << sz) * n; + opcode.reset(uint32_t(op_data.single_op) << 10); + offset_possibility = (1u << sz) * n; } - else if (v.hasElementIndex()) { - if (n != opData.n) + else if (v.has_element_index()) { + if (n != op_data.n) goto InvalidInstruction; // LDx/STx (single structure). - static const uint8_t opcSsizeBySzS[] = { 0x0u << 3, 0x2u << 3, 0x4u << 3, (0x4u << 3) | 1u }; + static const uint8_t opc_s_size_by_sz_table[] = { 0x0u << 3, 0x2u << 3, 0x4u << 3, (0x4u << 3) | 1u }; - opcode.reset(uint32_t(opData.singleOp) << 10); - opcSsize = opcSsizeBySzS[sz]; - offsetPossibility = (1u << sz) * opData.n; + opcode.reset(uint32_t(op_data.single_op) << 10); + opc_s_size = opc_s_size_by_sz_table[sz]; + offset_possibility = (1u << sz) * op_data.n; - uint32_t elementIndex = v.elementIndex(); - uint32_t maxElementIndex = 15 >> sz; + uint32_t element_index = v.element_index(); + uint32_t max_element_index = 15 >> sz; - if (elementIndex > maxElementIndex) + if (element_index > max_element_index) goto InvalidElementIndex; - elementIndex <<= sz; - q = elementIndex >> 3; - opcSsize |= elementIndex & 0x7u; + element_index <<= sz; + q = element_index >> 3; + opc_s_size |= element_index & 0x7u; } else { // LDx/STx (multiple structures). - static const uint8_t opcSsizeByN[] = { 0u, 0x7u << 2, 0xAu << 2, 0x6u << 2, 0x2u << 2 }; + static const uint8_t opc_s_size_by_n_table[] = { 0u, 0x7u << 2, 0xAu << 2, 0x6u << 2, 0x2u << 2 }; - q = diff(v.type(), RegType::kARM_VecD); + q = diff(v.reg_type(), RegType::kVec64); if (q > 1) goto InvalidInstruction; - if (opData.n == 1) - opcSsize |= opcSsizeByN[n]; + if (op_data.n == 1) + opc_s_size |= opc_s_size_by_n_table[n]; - opcode.reset(uint32_t(opData.multipleOp) << 10); - offsetPossibility = (8u << q) * n; + opcode.reset(uint32_t(op_data.multiple_op) << 10); + offset_possibility = (8u << q) * n; } - if (m.hasIndex()) { - if (m.hasOffset() || !m.isPostIndex()) + if (m.has_index()) { + if (m.has_offset() || !m.is_post_index()) goto InvalidAddress; - rm = m.indexId(); + rm = m.index_id(); if (rm > 30) goto InvalidAddress; @@ -4848,8 +4948,8 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co opcode |= B(23); } else { - if (m.hasOffset()) { - if (m.offset() != int32_t(offsetPossibility) || !m.isPostIndex()) + if (m.has_offset()) { + if (m.offset() != int32_t(offset_possibility) || !m.is_post_index()) goto InvalidAddress; rm = 31; @@ -4858,10 +4958,10 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co } } - opcode.addImm(q, 30); - opcode.addImm(rm, 16); - opcode.addImm(opcSsize, 10); - opcode.addImm(rn, 5); + opcode.add_imm(q, 30); + opcode.add_imm(rm, 16); + opcode.add_imm(opc_s_size, 10); + opcode.add_imm(rn, 5); goto EmitOp_Rd0; } @@ -4876,52 +4976,52 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // -------------------------------------------------------------------------- EmitOp_Rd0: - if (!checkValidRegs(o0)) + if (!check_valid_regs(o0)) goto InvalidPhysId; - opcode.addReg(o0, 0); + opcode.add_reg(o0, 0); goto EmitOp; EmitOp_Rn5: - if (!checkValidRegs(o0)) + if (!check_valid_regs(o0)) goto InvalidPhysId; - opcode.addReg(o0, 5); + opcode.add_reg(o0, 5); goto EmitOp; EmitOp_Rn5_Rm16: - if (!checkValidRegs(o0, o1)) + if (!check_valid_regs(o0, o1)) goto InvalidPhysId; - opcode.addReg(o0, 5); - opcode.addReg(o1, 16); + opcode.add_reg(o0, 5); + opcode.add_reg(o1, 16); goto EmitOp; EmitOp_Rd0_Rn5: - if (!checkValidRegs(o0, o1)) + if (!check_valid_regs(o0, o1)) goto InvalidPhysId; - opcode.addReg(o0, 0); - opcode.addReg(o1, 5); + opcode.add_reg(o0, 0); + opcode.add_reg(o1, 5); goto EmitOp; EmitOp_Rd0_Rn5_Rm16_Ra10: - if (!checkValidRegs(o0, o1, o2, o3)) + if (!check_valid_regs(o0, o1, o2, o3)) goto InvalidPhysId; - opcode.addReg(o0, 0); - opcode.addReg(o1, 5); - opcode.addReg(o2, 16); - opcode.addReg(o3, 10); + opcode.add_reg(o0, 0); + opcode.add_reg(o1, 5); + opcode.add_reg(o2, 16); + opcode.add_reg(o3, 10); goto EmitOp; EmitOp_Rd0_Rn5_Rm16: - if (!checkValidRegs(o0, o1, o3)) + if (!check_valid_regs(o0, o1, o3)) goto InvalidPhysId; - opcode.addReg(o0, 0); - opcode.addReg(o1, 5); - opcode.addReg(o2, 16); + opcode.add_reg(o0, 0); + opcode.add_reg(o1, 5); + opcode.add_reg(o2, 16); goto EmitOp; // -------------------------------------------------------------------------- @@ -4930,13 +5030,15 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co EmitOp_Multiple: { - ASMJIT_ASSERT(multipleOpCount > 0); - err = writer.ensureSpace(this, multipleOpCount * 4u); - if (ASMJIT_UNLIKELY(err)) + ASMJIT_ASSERT(multiple_op_count > 0); + err = writer.ensure_space(this, multiple_op_count * 4u); + if (ASMJIT_UNLIKELY(err != Error::kOk)) { goto Failed; + } - for (uint32_t i = 0; i < multipleOpCount; i++) - writer.emit32uLE(multipleOpData[i]); + for (uint32_t i = 0; i < multiple_op_count; i++) { + writer.emit32u_le(multiple_op_data[i]); + } goto EmitDone; } @@ -4946,31 +5048,36 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co // -------------------------------------------------------------------------- EmitOp_MemBase_Rn5: - if (!checkMemBase(rmRel->as())) + if (!check_mem_base(rm_rel->as())) { goto InvalidAddress; + } - opcode.addReg(rmRel->as().baseId(), 5); + opcode.add_reg(rm_rel->as().base_id(), 5); goto EmitOp; EmitOp_MemBaseNoImm_Rn5: - if (!checkMemBase(rmRel->as()) || rmRel->as().hasIndex()) + if (!check_mem_base(rm_rel->as()) || rm_rel->as().has_index()) { goto InvalidAddress; + } - if (rmRel->as().hasOffset()) + if (rm_rel->as().has_offset()) { goto InvalidDisplacement; + } - opcode.addReg(rmRel->as().baseId(), 5); + opcode.add_reg(rm_rel->as().base_id(), 5); goto EmitOp; EmitOp_MemBaseIndex_Rn5_Rm16: - if (!rmRel->as().hasBaseReg()) + if (!rm_rel->as().has_base_reg()) { goto InvalidAddress; + } - if (rmRel->as().indexId() > 30 && rmRel->as().indexId() != Gp::kIdZr) + if (rm_rel->as().index_id() > 30 && rm_rel->as().index_id() != Gp::kIdZr) { goto InvalidPhysId; + } - opcode.addReg(rmRel->as().indexId(), 16); - opcode.addReg(rmRel->as().baseId(), 5); + opcode.add_reg(rm_rel->as().index_id(), 16); + opcode.add_reg(rm_rel->as().base_id(), 5); goto EmitOp; // -------------------------------------------------------------------------- @@ -4979,70 +5086,75 @@ Error Assembler::_emit(InstId instId, const Operand_& o0, const Operand_& o1, co EmitOp_Rel: { - if (rmRel->isLabel() || rmRel->isMem()) { - uint32_t labelId; - int64_t labelOffset = 0; + if (rm_rel->is_label() || rm_rel->is_mem()) { + uint32_t label_id; + int64_t label_offset = 0; - if (rmRel->isLabel()) { - labelId = rmRel->as